From fddb7eb4f40726d02622724f9f1968691f05d9b9 Mon Sep 17 00:00:00 2001 From: Howie Leung Date: Sat, 18 Jan 2025 10:17:32 -0800 Subject: [PATCH 01/16] Fixed dead links in pypi and makie it more searchable (#38983) * Fixed dead links in pypi and makie it more searchable * fix analysis --- sdk/ai/azure-ai-projects/README.md | 4 ++-- sdk/ai/azure-ai-projects/setup.py | 34 ++++++++++++++++++++++++++++-- 2 files changed, 34 insertions(+), 4 deletions(-) diff --git a/sdk/ai/azure-ai-projects/README.md b/sdk/ai/azure-ai-projects/README.md index 23cd64f1c1fd..ef25aea1ff40 100644 --- a/sdk/ai/azure-ai-projects/README.md +++ b/sdk/ai/azure-ai-projects/README.md @@ -1,4 +1,4 @@ - + # Azure AI Projects client library for Python Use the AI Projects client library (in preview) to: @@ -68,7 +68,7 @@ For example, get the inference endpoint URL and credentials associated with your - [Reporting issues](#reporting-issues) - [Next steps](#next-steps) - [Contributing](#contributing) - + ## Getting started ### Prerequisite diff --git a/sdk/ai/azure-ai-projects/setup.py b/sdk/ai/azure-ai-projects/setup.py index 59bfc828a65c..23a8df9c3603 100644 --- a/sdk/ai/azure-ai-projects/setup.py +++ b/sdk/ai/azure-ai-projects/setup.py @@ -15,6 +15,16 @@ PACKAGE_NAME = "azure-ai-projects" PACKAGE_PPRINT_NAME = "Azure AI Projects" +PIPY_LONG_DESCRIPTION_BEGIN = "" +PIPY_LONG_DESCRIPTION_END = "" +LINKS_DIVIDER = "" + +GITHUB_URL = f"https://aka.ms/azsdk/azure-ai-projects/python/code" + +# Define the regular expression pattern to match links in the format [section name](#section_header) +pattern = re.compile(r'\[([^\]]+)\]\(#([^\)]+)\)') + + # a-b-c => a/b/c package_folder_path = PACKAGE_NAME.replace("-", "/") @@ -26,17 +36,36 @@ raise RuntimeError("Cannot find version information") +long_description = "" + +# When you click the links in the Table of Content which has the format of {URL/#section_header}, you are supposed to be redirected to the section header. +# However, this is not supported when the README is rendered in pypi.org. The README doesn't render with id={section_header} in HTML. +# To resolve this broken link, we make the long description to have top of the README content, the Table of Content, and the links at the bottom of the README +# And replace the links in Table of Content to redirect to github.com. +with open("README.md", "r") as f: + readme_content = f.read() + start_index = readme_content.find(PIPY_LONG_DESCRIPTION_BEGIN) + len(PIPY_LONG_DESCRIPTION_BEGIN) + end_index = readme_content.find(PIPY_LONG_DESCRIPTION_END) + long_description = readme_content[start_index:end_index].strip() + long_description = long_description.replace("{{package_name}}", PACKAGE_PPRINT_NAME) + long_description = re.sub(pattern, rf'[\1]({GITHUB_URL})', long_description) + links_index = readme_content.find(LINKS_DIVIDER) + long_description += "\n\n" + readme_content[links_index:].strip() + +with open("CHANGELOG.md", "r") as f: + long_description += "\n\n" + f.read() + setup( name=PACKAGE_NAME, version=version, description="Microsoft {} Client Library for Python".format(PACKAGE_PPRINT_NAME), - long_description=open("README.md", "r").read(), + long_description=long_description, long_description_content_type="text/markdown", license="MIT License", author="Microsoft Corporation", author_email="azpysdkhelp@microsoft.com", url="https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/ai/azure-ai-projects", - keywords="azure, azure sdk", + keywords="azure sdk, azure, ai, agents, foundry, inference, chat completion, project, evaluation", classifiers=[ "Development Status :: 4 - Beta", "Programming Language :: Python", @@ -49,6 +78,7 @@ "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", "License :: OSI Approved :: MIT License", + "Topic :: Scientific/Engineering :: Artificial Intelligence", ], zip_safe=False, packages=find_packages( From 5e7b5d72bda580b2660c7d1c3c90cdacd40f8aba Mon Sep 17 00:00:00 2001 From: M-Hietala <78813398+M-Hietala@users.noreply.github.com> Date: Wed, 22 Jan 2025 16:44:04 -0600 Subject: [PATCH 02/16] multiagent updates (#39349) * multiagent updates * fix spelling error * adding missing file --------- Co-authored-by: Marko Hietala --- .../samples/agents/multiagent/agent_team.py | 230 ++++++++++++------ .../agents/multiagent/agent_team_config.yaml | 15 +- .../multiagent/agent_trace_configurator.py | 60 +++++ .../multiagent/sample_agents_agent_team.py | 58 +++-- ...le_agents_agent_team_custom_team_leader.py | 114 +++++++++ .../sample_agents_multi_agent_team.py | 132 +++++----- .../multiagent/user_functions_with_traces.py | 110 +++++++++ 7 files changed, 551 insertions(+), 168 deletions(-) create mode 100644 sdk/ai/azure-ai-projects/samples/agents/multiagent/agent_trace_configurator.py create mode 100644 sdk/ai/azure-ai-projects/samples/agents/multiagent/sample_agents_agent_team_custom_team_leader.py create mode 100644 sdk/ai/azure-ai-projects/samples/agents/multiagent/user_functions_with_traces.py diff --git a/sdk/ai/azure-ai-projects/samples/agents/multiagent/agent_team.py b/sdk/ai/azure-ai-projects/samples/agents/multiagent/agent_team.py index 29fa311b7ec9..2414555bf272 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/multiagent/agent_team.py +++ b/sdk/ai/azure-ai-projects/samples/agents/multiagent/agent_team.py @@ -4,9 +4,17 @@ # ------------------------------------ import os import yaml # type: ignore -from typing import Any, Dict, Optional, Set, List +from azure.core.settings import settings # type: ignore + +from opentelemetry import trace +from opentelemetry.trace import StatusCode, Span # noqa: F401 # pylint: disable=unused-import +from opentelemetry.trace import Span +from azure.core.tracing import AbstractSpan +from typing import Any, Dict, Optional, Set, Tuple, List from azure.ai.projects import AIProjectClient -from azure.ai.projects.models import FunctionTool, ToolSet, MessageRole, Agent +from azure.ai.projects.models import FunctionTool, ToolSet, MessageRole, Agent, AgentThread + +tracer = trace.get_tracer(__name__) class _AgentTeamMember: @@ -32,7 +40,7 @@ def __init__( self.can_delegate = can_delegate -class _AgentTask: +class AgentTask: """ Encapsulates a task for an agent to perform. @@ -57,11 +65,13 @@ class AgentTeam: _teams: Dict[str, "AgentTeam"] = {} _project_client: AIProjectClient - _thread_id: str = "" + _agent_thread: Optional[AgentThread] = None _team_leader: Optional[_AgentTeamMember] = None _members: List[_AgentTeamMember] = [] - _tasks: List[_AgentTask] = [] + _tasks: List[AgentTask] = [] _team_name: str = "" + _current_request_span: Optional[Span] = None + _current_task_span: Optional[Span] = None def __init__(self, team_name: str, project_client: AIProjectClient): """ @@ -146,7 +156,26 @@ def add_agent( ) self._members.append(member) - def _add_task(self, task: _AgentTask) -> None: + def set_team_leader(self, model: str, name: str, instructions: str, toolset: Optional[ToolSet] = None) -> None: + """ + Set the team leader for this AgentTeam. + + If team leader has not been set prior to the call to assemble_team, + then a default team leader will be set. + + :param model: The model name (e.g. GPT-4) for the agent. + :param name: The name of the team leader. + :param instructions: The instructions for the team leader. These instructions + are not modified by the implementation, so all required + information about other team members and how to pass tasks + to them should be included. + :param toolset: An optional ToolSet to configure specific tools (functions, etc.) + for the team leader. + """ + member = _AgentTeamMember(model=model, name=name, instructions=instructions, toolset=toolset) + self._team_leader = member + + def add_task(self, task: AgentTask) -> None: """ Add a new task to the team's task list. @@ -154,31 +183,13 @@ def _add_task(self, task: _AgentTask) -> None: """ self._tasks.append(task) - def create_team_leader(self, model: str, name: str, instructions: str, toolset: Optional[ToolSet] = None) -> None: + def _create_team_leader(self) -> None: """ - Add a new agent (team member) to this AgentTeam. - - If team leader has not been created prior to the call to assemble_team, - then default team leader will be created automatically. - - :param model: The model name (e.g. GPT-4) for the agent. - :param name: The name of the team leader agent being. - :param instructions: The initial instructions/personality for the agent. - :param toolset: An optional ToolSet to configure specific tools (functions, etc.) for the agent. + Create the team leader agent. """ assert self._project_client is not None, "project_client must not be None" - assert self._team_leader is None, "team leader has already been created" - # List all agents (will be empty at this moment if you haven't added any, or you can append after they're added) - for member in self._members: - instructions += f"- {member.name}: {member.instructions}\n" + assert self._team_leader is not None, "team leader has not been added" - self._team_leader = _AgentTeamMember( - model=model, - name=name, - instructions=instructions, - toolset=toolset, - can_delegate=True, - ) self._team_leader.agent_instance = self._project_client.agents.create_agent( model=self._team_leader.model, name=self._team_leader.name, @@ -186,18 +197,23 @@ def create_team_leader(self, model: str, name: str, instructions: str, toolset: toolset=self._team_leader.toolset, ) - def _create_team_leader(self): + def _set_default_team_leader(self): """ - Create and initialize the default 'TeamLeader' agent with awareness of all other agents. + Set the default 'TeamLeader' agent with awareness of all other agents. """ toolset = ToolSet() toolset.add(default_function_tool) instructions = self.TEAM_LEADER_INSTRUCTIONS.format(agent_name="TeamLeader", team_name=self.team_name) + "\n" - self.create_team_leader( + # List all agents (will be empty at this moment if you haven't added any, or you can append after they're added) + for member in self._members: + instructions += f"- {member.name}: {member.instructions}\n" + + self._team_leader = _AgentTeamMember( model=self.TEAM_LEADER_MODEL, name="TeamLeader", instructions=instructions, toolset=toolset, + can_delegate=True, ) def assemble_team(self): @@ -208,7 +224,9 @@ def assemble_team(self): assert self._project_client is not None, "project_client must not be None" if self._team_leader is None: - self._create_team_leader() + self._set_default_team_leader() + + self._create_team_leader() for member in self._members: if member is self._team_leader: @@ -252,6 +270,16 @@ def dismantle_team(self) -> None: self._project_client.agents.delete_agent(member.agent_instance.id) AgentTeam._remove_team(self.team_name) + def _add_task_completion_event( + self, + span: Span, + result: str, + ) -> None: + + attributes: Dict[str, Any] = {} + attributes["agent_team.task.result"] = result + span.add_event(name=f"agent_team.task_completed", attributes=attributes) + def process_request(self, request: str) -> None: """ Handle a user's request by creating a team and delegating tasks to @@ -261,43 +289,69 @@ def process_request(self, request: str) -> None: """ assert self._project_client is not None, "project client must not be None" assert self._team_leader is not None, "team leader must not be None" - thread = self._project_client.agents.create_thread() - print(f"Created thread with ID: {thread.id}") - self._thread_id = thread.id - team_leader_request = self.TEAM_LEADER_INITIAL_REQUEST.format(original_request=request) - self._add_task(_AgentTask(self._team_leader.name, team_leader_request, "user")) - while self._tasks: - task = self._tasks.pop(0) - print( - f"Starting task for agent '{task.recipient}'. " - f"Requestor: '{task.requestor}'. " - f"Task description: '{task.task_description}'." - ) - message = self._project_client.agents.create_message( - thread_id=self._thread_id, - role="user", - content=task.task_description, + + if self._agent_thread is None: + self._agent_thread = self._project_client.agents.create_thread() + print(f"Created thread with ID: {self._agent_thread.id}") + + with tracer.start_as_current_span("agent_team_request") as current_request_span: + self._current_request_span = current_request_span + if self._current_request_span is not None: + self._current_request_span.set_attribute("agent_team.name", self.team_name) + team_leader_request = self.TEAM_LEADER_INITIAL_REQUEST.format(original_request=request) + _create_task( + team_name=self.team_name, + recipient=self._team_leader.name, + request=team_leader_request, + requestor="user", ) - print(f"Created message with ID: {message.id} for task in thread {self._thread_id}") - agent = self._get_member_by_name(task.recipient) - if agent and agent.agent_instance: - run = self._project_client.agents.create_and_process_run( - thread_id=self._thread_id, assistant_id=agent.agent_instance.id - ) - print(f"Created and processed run for agent '{agent.name}', run ID: {run.id}") - messages = self._project_client.agents.list_messages(thread_id=self._thread_id) - text_message = messages.get_last_text_message_by_role(role=MessageRole.AGENT) - if text_message and text_message.text: - print(f"Agent '{agent.name}' completed task. " f"Outcome: {text_message.text.value}") - - # If no tasks remain AND the recipient is not the TeamLeader, - # let the TeamLeader see if more delegation is needed. - if not self._tasks and not task.recipient == "TeamLeader": - team_leader_request = self.TEAM_LEADER_TASK_COMPLETENESS_CHECK_INSTRUCTIONS - task = _AgentTask( - recipient=self._team_leader.name, task_description=team_leader_request, requestor="user" - ) - self._add_task(task) + while self._tasks: + task = self._tasks.pop(0) + with tracer.start_as_current_span("agent_team_task") as current_task_span: + self._current_task_span = current_task_span + if self._current_task_span is not None: + self._current_task_span.set_attribute("agent_team.name", self.team_name) + self._current_task_span.set_attribute("agent_team.task.recipient", task.recipient) + self._current_task_span.set_attribute("agent_team.task.requestor", task.requestor) + self._current_task_span.set_attribute("agent_team.task.description", task.task_description) + print( + f"Starting task for agent '{task.recipient}'. " + f"Requestor: '{task.requestor}'. " + f"Task description: '{task.task_description}'." + ) + message = self._project_client.agents.create_message( + thread_id=self._agent_thread.id, + role="user", + content=task.task_description, + ) + print(f"Created message with ID: {message.id} for task in thread {self._agent_thread.id}") + agent = self._get_member_by_name(task.recipient) + if agent and agent.agent_instance: + run = self._project_client.agents.create_and_process_run( + thread_id=self._agent_thread.id, assistant_id=agent.agent_instance.id + ) + print(f"Created and processed run for agent '{agent.name}', run ID: {run.id}") + messages = self._project_client.agents.list_messages(thread_id=self._agent_thread.id) + text_message = messages.get_last_text_message_by_role(role=MessageRole.AGENT) + if text_message and text_message.text: + print(f"Agent '{agent.name}' completed task. " f"Outcome: {text_message.text.value}") + if self._current_task_span is not None: + self._add_task_completion_event(self._current_task_span, result=text_message.text.value) + + # If no tasks remain AND the recipient is not the TeamLeader, + # let the TeamLeader see if more delegation is needed. + if not self._tasks and not task.recipient == "TeamLeader": + team_leader_request = self.TEAM_LEADER_TASK_COMPLETENESS_CHECK_INSTRUCTIONS + _create_task( + team_name=self.team_name, + recipient=self._team_leader.name, + request=team_leader_request, + requestor="user", + ) + # self._current_task_span.end() + self._current_task_span = None + # self._current_request_span.end() + self._current_request_span = None def _get_member_by_name(self, name) -> Optional[_AgentTeamMember]: """ @@ -313,6 +367,34 @@ def _get_member_by_name(self, name) -> Optional[_AgentTeamMember]: return member return None + """ + Requests another agent in the team to complete a task. + + :param span (Span): The event will be added to this span + :param team_name (str): The name of the team. + :param recipient (str): The name of the agent that is being requested to complete the task. + :param request (str): A description of the to complete. This can also be a question. + :param requestor (str): The name of the agent who is requesting the task. + :return: True if the task was successfully received, False otherwise. + :rtype: str + """ + + +def _add_create_task_event( + span: Span, + team_name: str, + requestor: str, + recipient: str, + request: str, +) -> None: + + attributes: Dict[str, Any] = {} + attributes["agent_team.task.team_name"] = team_name + attributes["agent_team.task.requestor"] = requestor + attributes["agent_team.task.recipient"] = recipient + attributes["agent_team.task.description"] = request + span.add_event(name=f"agent_team.create_task", attributes=attributes) + def _create_task(team_name: str, recipient: str, request: str, requestor: str) -> str: """ @@ -325,14 +407,24 @@ def _create_task(team_name: str, recipient: str, request: str, requestor: str) - :return: True if the task was successfully received, False otherwise. :rtype: str """ - task = _AgentTask(recipient=recipient, task_description=request, requestor=requestor) + task = AgentTask(recipient=recipient, task_description=request, requestor=requestor) team: Optional[AgentTeam] = None try: team = AgentTeam.get_team(team_name) + span: Optional[Span] = None + if team._current_task_span is not None: + span = team._current_task_span + elif team._current_request_span is not None: + span = team._current_request_span + + if span is not None: + _add_create_task_event( + span=span, team_name=team_name, requestor=requestor, recipient=recipient, request=request + ) except: pass if team is not None: - team._add_task(task) + team.add_task(task) return "True" return "False" diff --git a/sdk/ai/azure-ai-projects/samples/agents/multiagent/agent_team_config.yaml b/sdk/ai/azure-ai-projects/samples/agents/multiagent/agent_team_config.yaml index 61579bac8997..7249cfa50971 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/multiagent/agent_team_config.yaml +++ b/sdk/ai/azure-ai-projects/samples/agents/multiagent/agent_team_config.yaml @@ -5,20 +5,21 @@ TEAM_LEADER_INSTRUCTIONS: | You are an agent named '{agent_name}'. You are a leader of a team of agents. The name of your team is '{team_name}'. You are an agent that is responsible for receiving requests from user and utilizing a team of agents to complete the task. When you are passed a request, the only thing you will do is evaluate which team member should do which task next to complete the request. - You will use the provided create_task function to create a task for the agent that is best suited for handling the task next. + You will use the provided _create_task function to create a task for the agent that is best suited for handling the task next. You will respond with the description of who you assigned the task and why. When you think that the original user request is processed completely utilizing all the talent available in the team, you do not have to create anymore tasks. Using the skills of all the team members when applicable is highly valued. + Do not create parallel tasks. Here are the other agents in your team: TEAM_LEADER_INITIAL_REQUEST: | Please create a task for agent in the team that is best suited to next process the following request. - Use the create_task function available for you to create the task. The request is: - f"{original_request}" + Use the _create_task function available for you to create the task. The request is: + {original_request} TEAM_LEADER_TASK_COMPLETENESS_CHECK_INSTRUCTIONS: | - Check the discussion so far and especially the most recent message in the thread. - If you see a potential task that could improve the final outcome, then use the create_task function to create the task. + Check the discussion so far and especially the most recent agent response in the thread and if you see a potential task + that could improve the final outcome, then use the _create_task function to create the task. Do not ever ask user confirmation for creating a task. If the request is completely processed, you do not have to create a task. @@ -26,7 +27,7 @@ TEAM_MEMBER_CAN_DELEGATE_INSTRUCTIONS: | You are an agent named '{name}'. You are a member in a team of agents. The name of your team is '{team_name}'. {original_instructions} - - You can delegate tasks when appropriate. To delegate, call the create_task function, using your own name as the 'requestor'. + - You can delegate tasks when appropriate. To delegate, call the _create_task function, using your own name as the 'requestor'. - Provide a brief account of any tasks you assign and the outcome. - Ask for help from other team members if you see they have the relevant expertise. - Once you believe your assignment is complete, respond with your final answer or actions taken. @@ -37,6 +38,6 @@ TEAM_MEMBER_NO_DELEGATE_INSTRUCTIONS: | {original_instructions} - You do not delegate tasks. Instead, focus solely on fulfilling the tasks assigned to you. - - If you have suggestions for tasks better suited to another agent, simply mention it in your response, but do not call create_task yourself. + - If you have suggestions for tasks better suited to another agent, simply mention it in your response, but do not call _create_task yourself. - Once you believe your assignment is complete, respond with your final answer or actions taken. - Below are the other agents in your team: {team_description} diff --git a/sdk/ai/azure-ai-projects/samples/agents/multiagent/agent_trace_configurator.py b/sdk/ai/azure-ai-projects/samples/agents/multiagent/agent_trace_configurator.py new file mode 100644 index 000000000000..31dd6f7cb6b6 --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/multiagent/agent_trace_configurator.py @@ -0,0 +1,60 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +import os +import sys +from typing import cast +from opentelemetry import trace +from opentelemetry.sdk.trace import TracerProvider +from opentelemetry.sdk.trace.export import SimpleSpanProcessor, ConsoleSpanExporter +from azure.ai.projects import AIProjectClient +from azure.monitor.opentelemetry import configure_azure_monitor + + +class AgentTraceConfigurator: + def __init__(self, project_client: AIProjectClient): + self.project_client = project_client + + def enable_azure_monitor_tracing(self): + application_insights_connection_string = self.project_client.telemetry.get_connection_string() + if not application_insights_connection_string: + print("Application Insights was not enabled for this project.") + print("Enable it via the 'Tracing' tab in your AI Foundry project page.") + exit() + configure_azure_monitor(connection_string=application_insights_connection_string) + self.project_client.telemetry.enable() + + def enable_console_tracing_without_genai(self): + exporter = ConsoleSpanExporter() + trace.set_tracer_provider(TracerProvider()) + tracer = trace.get_tracer(__name__) + provider = cast(TracerProvider, trace.get_tracer_provider()) + provider.add_span_processor(SimpleSpanProcessor(exporter)) + print("Console tracing enabled without agent traces.") + + def enable_console_tracing_with_agent(self): + self.project_client.telemetry.enable(destination=sys.stdout) + print("Console tracing enabled with agent traces.") + + def display_menu(self): + print("Select a tracing option:") + print("1. Enable Azure Monitor tracing") + print("2. Enable console tracing without enabling gen_ai agent traces") + print("3. Enable console tracing with gen_ai agent traces") + print("4. Do not enable traces") + + def setup_tracing(self): + self.display_menu() + choice = input("Enter your choice (1-4): ") + + if choice == "1": + self.enable_azure_monitor_tracing() + elif choice == "2": + self.enable_console_tracing_without_genai() + elif choice == "3": + self.enable_console_tracing_with_agent() + elif choice == "4": + print("No tracing enabled.") + else: + print("Invalid choice. Please select a valid option.") diff --git a/sdk/ai/azure-ai-projects/samples/agents/multiagent/sample_agents_agent_team.py b/sdk/ai/azure-ai-projects/samples/agents/multiagent/sample_agents_agent_team.py index a1abd0245e13..184a7ef20ca5 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/multiagent/sample_agents_agent_team.py +++ b/sdk/ai/azure-ai-projects/samples/agents/multiagent/sample_agents_agent_team.py @@ -5,7 +5,7 @@ """ DESCRIPTION: - This sample demonstrates how to multiple agents using AgentTeam. + This sample demonstrates how to use multiple agents using AgentTeam with traces. USAGE: python sample_agents_agent_team.py @@ -14,39 +14,49 @@ pip install azure-ai-projects azure-identity - Set this environment variables with your own values: + Set these environment variables with your own values: PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Foundry project. + MODEL_DEPLOYMENT_NAME - the name of the model deployment to use. """ import os from azure.ai.projects import AIProjectClient from azure.identity import DefaultAzureCredential from agent_team import AgentTeam +from agent_trace_configurator import AgentTraceConfigurator project_client = AIProjectClient.from_connection_string( credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"], ) -with project_client: - agent_team = AgentTeam("test_team", project_client=project_client) - agent_team.add_agent( - model="gpt-4-1106-preview", - name="Coder", - instructions="You are software engineer who writes great code. Your name is Coder.", - ) - agent_team.add_agent( - model="gpt-4-1106-preview", - name="Reviewer", - instructions="You are software engineer who reviews code. Your name is Reviewer.", - ) - agent_team.assemble_team() - - print("A team of agents specialized in software engineering is available for requests.") - while True: - user_input = input("Input (type 'quit' to exit): ") - if user_input.lower() == "quit": - break - agent_team.process_request(request=user_input) - - agent_team.dismantle_team() +model_deployment_name = os.getenv("MODEL_DEPLOYMENT_NAME") + +if model_deployment_name is not None: + AgentTraceConfigurator(project_client=project_client).setup_tracing() + with project_client: + agent_team = AgentTeam("test_team", project_client=project_client) + agent_team.add_agent( + model=model_deployment_name, + name="Coder", + instructions="You are software engineer who writes great code. Your name is Coder.", + ) + agent_team.add_agent( + model=model_deployment_name, + name="Reviewer", + instructions="You are software engineer who reviews code. Your name is Reviewer.", + ) + agent_team.assemble_team() + + print("A team of agents specialized in software engineering is available for requests.") + while True: + user_input = input("Input (type 'quit' or 'exit' to exit): ") + if user_input.lower() == "quit": + break + elif user_input.lower() == "exit": + break + agent_team.process_request(request=user_input) + + agent_team.dismantle_team() +else: + print("Error: Please define the environment variable MODEL_DEPLOYMENT_NAME.") diff --git a/sdk/ai/azure-ai-projects/samples/agents/multiagent/sample_agents_agent_team_custom_team_leader.py b/sdk/ai/azure-ai-projects/samples/agents/multiagent/sample_agents_agent_team_custom_team_leader.py new file mode 100644 index 000000000000..d2cbca871ae2 --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/multiagent/sample_agents_agent_team_custom_team_leader.py @@ -0,0 +1,114 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to multiple agents using AgentTeam with traces. + +USAGE: + python sample_agents_agent_team_custom_team_leader.py + + Before running the sample: + + pip install azure-ai-projects azure-identity + + Set these environment variables with your own values: + PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Foundry project. + MODEL_DEPLOYMENT_NAME - the name of the model deployment to use. +""" + +import os +from typing import Optional, Set +from azure.ai.projects import AIProjectClient +from azure.identity import DefaultAzureCredential +from agent_team import AgentTeam, AgentTask +from agent_trace_configurator import AgentTraceConfigurator +from azure.ai.projects.models import FunctionTool, ToolSet + +project_client = AIProjectClient.from_connection_string( + credential=DefaultAzureCredential(), + conn_str=os.environ["PROJECT_CONNECTION_STRING"], +) + +model_deployment_name = os.getenv("MODEL_DEPLOYMENT_NAME") + + +def create_task(team_name: str, recipient: str, request: str, requestor: str) -> str: + """ + Requests another agent in the team to complete a task. + + :param team_name (str): The name of the team. + :param recipient (str): The name of the agent that is being requested to complete the task. + :param request (str): A description of the to complete. This can also be a question. + :param requestor (str): The name of the agent who is requesting the task. + :return: True if the task was successfully received, False otherwise. + :rtype: str + """ + task = AgentTask(recipient=recipient, task_description=request, requestor=requestor) + team: Optional[AgentTeam] = None + try: + team = AgentTeam.get_team(team_name) + except: + pass + if team is not None: + team.add_task(task) + return "True" + return "False" + + +# Any additional functions that might be used by the agents: +agent_team_default_functions: Set = { + create_task, +} + +default_function_tool = FunctionTool(functions=agent_team_default_functions) + + +if model_deployment_name is not None: + AgentTraceConfigurator(project_client=project_client).setup_tracing() + with project_client: + agent_team = AgentTeam("test_team", project_client=project_client) + toolset = ToolSet() + toolset.add(default_function_tool) + agent_team.set_team_leader( + model=model_deployment_name, + name="TeamLeader", + instructions="You are an agent named 'TeamLeader'. You are a leader of a team of agents. The name of your team is 'test_team'." + "You are an agent that is responsible for receiving requests from user and utilizing a team of agents to complete the task. " + "When you are passed a request, the only thing you will do is evaluate which team member should do which task next to complete the request. " + "You will use the provided create_task function to create a task for the agent that is best suited for handling the task next. " + "You will respond with the description of who you assigned the task and why. When you think that the original user request is " + "processed completely utilizing all the talent available in the team, you do not have to create anymore tasks. " + "Using the skills of all the team members when applicable is highly valued. " + "Do not create parallel tasks. " + "Here are the other agents in your team: " + "- Coder: You are software engineer who writes great code. Your name is Coder. " + "- Reviewer: You are software engineer who reviews code. Your name is Reviewer.", + toolset=toolset, + ) + agent_team.add_agent( + model=model_deployment_name, + name="Coder", + instructions="You are software engineer who writes great code. Your name is Coder.", + ) + agent_team.add_agent( + model=model_deployment_name, + name="Reviewer", + instructions="You are software engineer who reviews code. Your name is Reviewer.", + ) + agent_team.assemble_team() + + print("A team of agents specialized in software engineering is available for requests.") + while True: + user_input = input("Input (type 'quit' or 'exit' to exit): ") + if user_input.lower() == "quit": + break + elif user_input.lower() == "exit": + break + agent_team.process_request(request=user_input) + + agent_team.dismantle_team() +else: + print("Error: Please define the environment variable MODEL_DEPLOYMENT_NAME.") diff --git a/sdk/ai/azure-ai-projects/samples/agents/multiagent/sample_agents_multi_agent_team.py b/sdk/ai/azure-ai-projects/samples/agents/multiagent/sample_agents_multi_agent_team.py index 5f98b577d05b..501ef6fb0335 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/multiagent/sample_agents_multi_agent_team.py +++ b/sdk/ai/azure-ai-projects/samples/agents/multiagent/sample_agents_multi_agent_team.py @@ -14,22 +14,20 @@ pip install azure-ai-projects azure-identity - Set this environment variables with your own values: + Set these environment variables with your own values: PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Foundry project. + MODEL_DEPLOYMENT_NAME - the name of the model deployment to use. """ -import os, sys +import os -# Get the parent directory -parent_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "..")) -# Add the parent directory to the system path -sys.path.append(parent_dir) from typing import Set -from user_functions import * +from user_functions_with_traces import * from azure.ai.projects import AIProjectClient from azure.ai.projects.models import ToolSet, FunctionTool from azure.identity import DefaultAzureCredential from agent_team import AgentTeam +from agent_trace_configurator import AgentTraceConfigurator project_client = AIProjectClient.from_connection_string( credential=DefaultAzureCredential(), @@ -38,67 +36,65 @@ user_function_set_1: Set = {fetch_current_datetime, fetch_weather} -user_function_set_2: Set = { - send_email_using_recipient_name, - calculate_sum, - toggle_flag, - merge_dicts, - get_user_info, - longest_word_in_sentences, - process_records, -} +user_function_set_2: Set = {send_email_using_recipient_name} user_function_set_3: Set = {convert_temperature} -with project_client: - - functions = FunctionTool(functions=user_function_set_1) - toolset1 = ToolSet() - toolset1.add(functions) - - agent_team = AgentTeam("test_team", project_client=project_client) - - agent_team.add_agent( - model="gpt-4-1106-preview", - name="TimeWeatherAgent", - instructions="You are a specialized agent for time and weather queries.", - toolset=toolset1, - can_delegate=True, - ) - - functions = FunctionTool(functions=user_function_set_2) - toolset2 = ToolSet() - toolset2.add(functions) - - agent_team.add_agent( - model="gpt-4-1106-preview", - name="SendEmailAgent", - instructions="You are a specialized agent for sending emails.", - toolset=toolset2, - can_delegate=False, - ) - - functions = FunctionTool(functions=user_function_set_3) - toolset3 = ToolSet() - toolset3.add(functions) - - agent_team.add_agent( - model="gpt-4-1106-preview", - name="TemperatureAgent", - instructions="You are a specialized agent for temperature conversion.", - toolset=toolset3, - can_delegate=False, - ) - - agent_team.assemble_team() - - user_request = ( - "Hello, Please provide me current time in '2023-%m-%d %H:%M:%S' format, and the weather in New York. " - "Finally, convert the Celsius to Fahrenheit and send an email to Example Recipient with summary of results." - ) - - # Once process_request is called, the TeamLeader will coordinate. - # The loop in process_request will pick up tasks from the queue, assign them, and so on. - agent_team.process_request(request=user_request) - - agent_team.dismantle_team() +model_deployment_name = os.getenv("MODEL_DEPLOYMENT_NAME") + +if model_deployment_name is not None: + AgentTraceConfigurator(project_client=project_client).setup_tracing() + with project_client: + + functions = FunctionTool(functions=user_function_set_1) + toolset1 = ToolSet() + toolset1.add(functions) + + agent_team = AgentTeam("test_team", project_client=project_client) + + agent_team.add_agent( + model=model_deployment_name, + name="TimeWeatherAgent", + instructions="You are a specialized agent for time and weather queries.", + toolset=toolset1, + can_delegate=True, + ) + + functions = FunctionTool(functions=user_function_set_2) + toolset2 = ToolSet() + toolset2.add(functions) + + agent_team.add_agent( + model=model_deployment_name, + name="SendEmailAgent", + instructions="You are a specialized agent for sending emails.", + toolset=toolset2, + can_delegate=False, + ) + + functions = FunctionTool(functions=user_function_set_3) + toolset3 = ToolSet() + toolset3.add(functions) + + agent_team.add_agent( + model=model_deployment_name, + name="TemperatureAgent", + instructions="You are a specialized agent for temperature conversion.", + toolset=toolset3, + can_delegate=False, + ) + + agent_team.assemble_team() + + user_request = ( + "Hello, Please provide me current time in '2023-%m-%d %H:%M:%S' format, and the weather in New York. " + "Finally, convert the Celsius to Fahrenheit and send an email to Example Recipient with summary of results." + ) + + # Once process_request is called, the TeamLeader will coordinate. + # The loop in process_request will pick up tasks from the queue, assign them, and so on. + agent_team.process_request(request=user_request) + + agent_team.dismantle_team() +else: + print("Error: Please define the environment variable MODEL_DEPLOYMENT_NAME.") diff --git a/sdk/ai/azure-ai-projects/samples/agents/multiagent/user_functions_with_traces.py b/sdk/ai/azure-ai-projects/samples/agents/multiagent/user_functions_with_traces.py new file mode 100644 index 000000000000..1a4910b19d83 --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/multiagent/user_functions_with_traces.py @@ -0,0 +1,110 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +import json +import datetime +from typing import Any, Callable, Set, Optional +from opentelemetry import trace + + +tracer = trace.get_tracer(__name__) + + +# These are the user-defined functions that can be called by the agent. +@tracer.start_as_current_span("fetch_current_datetime") # type: ignore +def fetch_current_datetime(format: Optional[str] = None) -> str: + """ + Get the current time as a JSON string, optionally formatted. + + :param format (Optional[str]): The format in which to return the current time. Defaults to None, which uses a standard format. + :return: The current time in JSON format. + :rtype: str + """ + current_time = datetime.datetime.now() + + # Use the provided format if available, else use a default format + if format: + time_format = format + else: + time_format = "%Y-%m-%d %H:%M:%S" + + time_json = json.dumps({"current_time": current_time.strftime(time_format)}) + return time_json + + +@tracer.start_as_current_span("fetch_weather") # type: ignore +def fetch_weather(location: str) -> str: + """ + Fetches the weather information for the specified location. + + :param location (str): The location to fetch weather for. + :return: Weather information as a JSON string. + :rtype: str + """ + # In a real-world scenario, you'd integrate with a weather API. + # Here, we'll mock the response. + mock_weather_data = {"New York": "Sunny, 25°C", "London": "Cloudy, 18°C", "Tokyo": "Rainy, 22°C"} + weather = mock_weather_data.get(location, "Weather data not available for this location.") + weather_json = json.dumps({"weather": weather}) + return weather_json + + +@tracer.start_as_current_span("send_email_using_recipient_name") # type: ignore +def send_email_using_recipient_name(recipient: str, subject: str, body: str) -> str: + """ + Sends an email with the specified subject and body to the recipient. + + :param recipient (str): Name of the recipient. + :param subject (str): Subject of the email. + :param body (str): Body content of the email. + :return: Confirmation message. + :rtype: str + """ + # In a real-world scenario, you'd use an SMTP server or an email service API. + # Here, we'll mock the email sending. + print(f"Sending email to {recipient}...") + print(f"Subject: {subject}") + print(f"Body:\n{body}") + + message_json = json.dumps({"message": f"Email successfully sent to {recipient}."}) + return message_json + + +@tracer.start_as_current_span("convert_temperature") # type: ignore +def convert_temperature(celsius: float) -> str: + """Converts temperature from Celsius to Fahrenheit. + + :param celsius (float): Temperature in Celsius. + :rtype: float + + :return: Temperature in Fahrenheit. + :rtype: str + """ + fahrenheit = (celsius * 9 / 5) + 32 + return json.dumps({"fahrenheit": fahrenheit}) + + +# Example User Input for Each Function +# 1. Fetch Current DateTime +# User Input: "What is the current date and time?" +# User Input: "What is the current date and time in '%Y-%m-%d %H:%M:%S' format?" + +# 2. Fetch Weather +# User Input: "Can you provide the weather information for New York?" + +# 3. Send Email Using Recipient Name +# User Input: "Send an email to John Doe with the subject 'Meeting Reminder' and body 'Don't forget our meeting at 3 PM.'" + +# 4. Convert Temperature +# User Input: "Convert 25 degrees Celsius to Fahrenheit." + + +# Statically defined user functions for fast reference +user_functions: Set[Callable[..., Any]] = { + fetch_current_datetime, + fetch_weather, + send_email_using_recipient_name, + convert_temperature, +} From aa3e318cc818a6543dbb11a8747768a19a150dd7 Mon Sep 17 00:00:00 2001 From: nick863 <30440255+nick863@users.noreply.github.com> Date: Wed, 22 Jan 2025 15:49:32 -0800 Subject: [PATCH 03/16] Comply with the new API version (#39328) * Comply with the new API version * Fix * Fix linter * Fix linter * Fix --- .../azure/ai/projects/_serialization.py | 70 +---- .../azure/ai/projects/aio/_patch.py | 40 +-- .../ai/projects/aio/operations/_operations.py | 49 ++-- .../ai/projects/aio/operations/_patch.py | 40 ++- .../azure/ai/projects/models/_models.py | 249 ++++++++++++++++-- .../ai/projects/operations/_operations.py | 50 ++-- ...e_agents_vector_store_file_search_async.py | 18 +- .../sample_agents_vector_store_file_search.py | 17 +- .../tests/agents/test_agents_client.py | 13 +- .../tests/agents/test_agents_client_async.py | 12 +- sdk/ai/azure-ai-projects/tsp-location.yaml | 2 +- 11 files changed, 367 insertions(+), 193 deletions(-) diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/_serialization.py b/sdk/ai/azure-ai-projects/azure/ai/projects/_serialization.py index b24ab2885450..670738f0789c 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/_serialization.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/_serialization.py @@ -185,73 +185,7 @@ def deserialize_from_http_generics(cls, body_bytes: Optional[Union[AnyStr, IO]], except NameError: _long_type = int - -class UTC(datetime.tzinfo): - """Time Zone info for handling UTC""" - - def utcoffset(self, dt): - """UTF offset for UTC is 0. - - :param datetime.datetime dt: The datetime - :returns: The offset - :rtype: datetime.timedelta - """ - return datetime.timedelta(0) - - def tzname(self, dt): - """Timestamp representation. - - :param datetime.datetime dt: The datetime - :returns: The timestamp representation - :rtype: str - """ - return "Z" - - def dst(self, dt): - """No daylight saving for UTC. - - :param datetime.datetime dt: The datetime - :returns: The daylight saving time - :rtype: datetime.timedelta - """ - return datetime.timedelta(hours=1) - - -try: - from datetime import timezone as _FixedOffset # type: ignore -except ImportError: # Python 2.7 - - class _FixedOffset(datetime.tzinfo): # type: ignore - """Fixed offset in minutes east from UTC. - Copy/pasted from Python doc - :param datetime.timedelta offset: offset in timedelta format - """ - - def __init__(self, offset) -> None: - self.__offset = offset - - def utcoffset(self, dt): - return self.__offset - - def tzname(self, dt): - return str(self.__offset.total_seconds() / 3600) - - def __repr__(self): - return "".format(self.tzname(None)) - - def dst(self, dt): - return datetime.timedelta(0) - - def __getinitargs__(self): - return (self.__offset,) - - -try: - from datetime import timezone - - TZ_UTC = timezone.utc -except ImportError: - TZ_UTC = UTC() # type: ignore +TZ_UTC = datetime.timezone.utc _FLATTEN = re.compile(r"(? Dict[str, str]: ] # Add all objects you want publicly available to users at this package level -class _SyncCredentialWrapper(TokenCredential): - """ - The class, synchronizing AsyncTokenCredential. - - :param async_credential: The async credential to be synchronized. - :type async_credential: ~azure.core.credentials_async.AsyncTokenCredential - """ - - def __init__(self, async_credential: "AsyncTokenCredential"): - self._async_credential = async_credential - - def get_token( - self, - *scopes: str, - claims: Optional[str] = None, - tenant_id: Optional[str] = None, - enable_cae: bool = False, - **kwargs: Any, - ) -> "AccessToken": - - pool = concurrent.futures.ThreadPoolExecutor() - return pool.submit( - asyncio.run, - self._async_credential.get_token( - *scopes, - claims=claims, - tenant_id=tenant_id, - enable_cae=enable_cae, - **kwargs, - ), - ).result() - - def patch_sdk(): """Do not remove from this file. diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_operations.py b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_operations.py index 5d0049d7db93..0b0b50eeac06 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_operations.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_operations.py @@ -12,6 +12,7 @@ from typing import Any, AsyncIterable, Callable, Dict, IO, List, Optional, TYPE_CHECKING, TypeVar, Union, overload import urllib.parse +from azure.core import AsyncPipelineClient from azure.core.async_paging import AsyncItemPaged, AsyncList from azure.core.exceptions import ( ClientAuthenticationError, @@ -31,6 +32,7 @@ from ... import _model_base, models as _models from ..._model_base import SdkJSONEncoder, _deserialize +from ..._serialization import Deserializer, Serializer from ..._vendor import FileType, prepare_multipart_form_data from ...operations._operations import ( build_agents_cancel_run_request, @@ -87,6 +89,7 @@ build_evaluations_update_request, build_telemetry_get_app_insights_request, ) +from .._configuration import AIProjectClientConfiguration if sys.version_info >= (3, 9): from collections.abc import MutableMapping @@ -113,10 +116,10 @@ class AgentsOperations: # pylint: disable=too-many-public-methods def __init__(self, *args, **kwargs) -> None: input_args = list(args) - self._client = input_args.pop(0) if input_args else kwargs.pop("client") - self._config = input_args.pop(0) if input_args else kwargs.pop("config") - self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") - self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") + self._client: AsyncPipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") + self._config: AIProjectClientConfiguration = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") @overload async def create_agent(self, body: JSON, *, content_type: str = "application/json", **kwargs: Any) -> _models.Agent: @@ -4362,7 +4365,7 @@ async def create_vector_store_file( *, content_type: str = "application/json", file_id: Optional[str] = None, - data_sources: Optional[List[_models.VectorStoreDataSource]] = None, + data_source: Optional[_models.VectorStoreDataSource] = None, chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, **kwargs: Any ) -> _models.VectorStoreFile: @@ -4375,8 +4378,8 @@ async def create_vector_store_file( :paramtype content_type: str :keyword file_id: Identifier of the file. Default value is None. :paramtype file_id: str - :keyword data_sources: Azure asset ID. Default value is None. - :paramtype data_sources: list[~azure.ai.projects.models.VectorStoreDataSource] + :keyword data_source: Azure asset ID. Default value is None. + :paramtype data_source: ~azure.ai.projects.models.VectorStoreDataSource :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will use the auto strategy. Default value is None. :paramtype chunking_strategy: ~azure.ai.projects.models.VectorStoreChunkingStrategyRequest @@ -4410,7 +4413,7 @@ async def create_vector_store_file( body: Union[JSON, IO[bytes]] = _Unset, *, file_id: Optional[str] = None, - data_sources: Optional[List[_models.VectorStoreDataSource]] = None, + data_source: Optional[_models.VectorStoreDataSource] = None, chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, **kwargs: Any ) -> _models.VectorStoreFile: @@ -4422,8 +4425,8 @@ async def create_vector_store_file( :type body: JSON or IO[bytes] :keyword file_id: Identifier of the file. Default value is None. :paramtype file_id: str - :keyword data_sources: Azure asset ID. Default value is None. - :paramtype data_sources: list[~azure.ai.projects.models.VectorStoreDataSource] + :keyword data_source: Azure asset ID. Default value is None. + :paramtype data_source: ~azure.ai.projects.models.VectorStoreDataSource :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will use the auto strategy. Default value is None. :paramtype chunking_strategy: ~azure.ai.projects.models.VectorStoreChunkingStrategyRequest @@ -4446,7 +4449,7 @@ async def create_vector_store_file( cls: ClsType[_models.VectorStoreFile] = kwargs.pop("cls", None) if body is _Unset: - body = {"chunking_strategy": chunking_strategy, "data_sources": data_sources, "file_id": file_id} + body = {"chunking_strategy": chunking_strategy, "data_source": data_source, "file_id": file_id} body = {k: v for k, v in body.items() if v is not None} content_type = content_type or "application/json" _content = None @@ -5060,10 +5063,10 @@ class ConnectionsOperations: def __init__(self, *args, **kwargs) -> None: input_args = list(args) - self._client = input_args.pop(0) if input_args else kwargs.pop("client") - self._config = input_args.pop(0) if input_args else kwargs.pop("config") - self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") - self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") + self._client: AsyncPipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") + self._config: AIProjectClientConfiguration = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") @distributed_trace_async async def _get_workspace(self, **kwargs: Any) -> _models._models.GetWorkspaceResponse: @@ -5392,10 +5395,10 @@ class TelemetryOperations: def __init__(self, *args, **kwargs) -> None: input_args = list(args) - self._client = input_args.pop(0) if input_args else kwargs.pop("client") - self._config = input_args.pop(0) if input_args else kwargs.pop("config") - self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") - self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") + self._client: AsyncPipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") + self._config: AIProjectClientConfiguration = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") @distributed_trace_async async def _get_app_insights( @@ -5483,10 +5486,10 @@ class EvaluationsOperations: def __init__(self, *args, **kwargs) -> None: input_args = list(args) - self._client = input_args.pop(0) if input_args else kwargs.pop("client") - self._config = input_args.pop(0) if input_args else kwargs.pop("config") - self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") - self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") + self._client: AsyncPipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") + self._config: AIProjectClientConfiguration = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") @distributed_trace_async async def get(self, id: str, **kwargs: Any) -> _models.Evaluation: diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py index bd400a00ee4a..2e749fb5efea 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py @@ -8,6 +8,7 @@ Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize """ import asyncio +import concurrent.futures import io import logging import os @@ -29,6 +30,7 @@ overload, ) +from azure.core.credentials import TokenCredential from azure.core.exceptions import ResourceNotFoundError from azure.core.tracing.decorator_async import distributed_trace_async @@ -54,6 +56,8 @@ from azure.ai.inference.aio import ChatCompletionsClient, EmbeddingsClient, ImageEmbeddingsClient from azure.ai.projects import _types + from azure.core.credentials import AccessToken + from azure.core.credentials_async import AsyncTokenCredential logger = logging.getLogger(__name__) @@ -497,10 +501,11 @@ async def get( from ...models._patch import SASTokenCredential cred_prop = cast(InternalConnectionPropertiesSASAuth, connection.properties) + sync_credential = _SyncCredentialWrapper(self._config.credential) token_credential = SASTokenCredential( sas_token=cred_prop.credentials.sas, - credential=self._config.credential, + credential=sync_credential, subscription_id=self._config.subscription_id, resource_group_name=self._config.resource_group_name, project_name=self._config.project_name, @@ -3038,6 +3043,39 @@ async def delete_agent(self, assistant_id: str, **kwargs: Any) -> _models.AgentD return await super().delete_agent(assistant_id, **kwargs) +class _SyncCredentialWrapper(TokenCredential): + """ + The class, synchronizing AsyncTokenCredential. + + :param async_credential: The async credential to be synchronized. + :type async_credential: ~azure.core.credentials_async.AsyncTokenCredential + """ + + def __init__(self, async_credential: "AsyncTokenCredential"): + self._async_credential = async_credential + + def get_token( + self, + *scopes: str, + claims: Optional[str] = None, + tenant_id: Optional[str] = None, + enable_cae: bool = False, + **kwargs: Any, + ) -> "AccessToken": + + pool = concurrent.futures.ThreadPoolExecutor() + return pool.submit( + asyncio.run, + self._async_credential.get_token( + *scopes, + claims=claims, + tenant_id=tenant_id, + enable_cae=enable_cae, + **kwargs, + ), + ).result() + + __all__: List[str] = [ "AgentsOperations", "ConnectionsOperations", diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_models.py b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_models.py index 999da2f12c81..6e6edb150dcf 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_models.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_models.py @@ -233,7 +233,7 @@ class AgentsNamedToolChoice(_model_base.Model): """ type: Union[str, "_models.AgentsNamedToolChoiceType"] = rest_field() - """the type of tool. If type is ``function``\\ , the function name must be set. Required. Known + """the type of tool. If type is ``function``\ , the function name must be set. Required. Known values are: \"function\", \"code_interpreter\", \"file_search\", \"bing_grounding\", \"fabric_aiskill\", \"sharepoint_grounding\", and \"azure_ai_search\".""" function: Optional["_models.FunctionName"] = rest_field() @@ -383,6 +383,23 @@ class AppInsightsProperties(_model_base.Model): connection_string: str = rest_field(name="ConnectionString") """Authentication type of the connection target. Required.""" + @overload + def __init__( + self, + *, + connection_string: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + class InputData(_model_base.Model): """Abstract data class for input data configuration. @@ -430,7 +447,7 @@ class ApplicationInsightsConfiguration(InputData, discriminator="app_insights"): :vartype resource_id: str :ivar query: Query to fetch the data. Required. :vartype query: str - :ivar service_name: Service name. + :ivar service_name: Service name. Required. :vartype service_name: str :ivar connection_string: Connection String to connect to ApplicationInsights. :vartype connection_string: str @@ -442,8 +459,8 @@ class ApplicationInsightsConfiguration(InputData, discriminator="app_insights"): """LogAnalytic Workspace resourceID associated with ApplicationInsights. Required.""" query: str = rest_field() """Query to fetch the data. Required.""" - service_name: Optional[str] = rest_field(name="serviceName") - """Service name.""" + service_name: str = rest_field(name="serviceName") + """Service name. Required.""" connection_string: Optional[str] = rest_field(name="connectionString") """Connection String to connect to ApplicationInsights.""" @@ -453,7 +470,7 @@ def __init__( *, resource_id: str, query: str, - service_name: Optional[str] = None, + service_name: str, connection_string: Optional[str] = None, ) -> None: ... @@ -828,6 +845,23 @@ class CredentialsApiKeyAuth(_model_base.Model): key: str = rest_field() """The API key. Required.""" + @overload + def __init__( + self, + *, + key: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + class CredentialsSASAuth(_model_base.Model): """The credentials needed for Shared Access Signatures (SAS) authentication. @@ -840,6 +874,23 @@ class CredentialsSASAuth(_model_base.Model): sas: str = rest_field(name="SAS") """The Shared Access Signatures (SAS) token. Required.""" + @overload + def __init__( + self, + *, + sas: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + class Trigger(_model_base.Model): """Abstract data class for input data configuration. @@ -1522,6 +1573,25 @@ class GetAppInsightsResponse(_model_base.Model): properties: "_models._models.AppInsightsProperties" = rest_field() """The properties of the resource. Required.""" + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + name: str, + properties: "_models._models.AppInsightsProperties", + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + class GetConnectionResponse(_model_base.Model): """Response from the listSecrets operation. @@ -1542,6 +1612,25 @@ class GetConnectionResponse(_model_base.Model): properties: "_models._models.InternalConnectionProperties" = rest_field() """The properties of the resource. Required.""" + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + name: str, + properties: "_models._models.InternalConnectionProperties", + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + class GetWorkspaceResponse(_model_base.Model): """Response from the Workspace - Get operation. @@ -1562,6 +1651,25 @@ class GetWorkspaceResponse(_model_base.Model): properties: "_models._models.WorkspaceProperties" = rest_field() """The properties of the resource. Required.""" + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + name: str, + properties: "_models._models.WorkspaceProperties", + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + class IncompleteRunDetails(_model_base.Model): """Details on why the run is incomplete. Will be ``null`` if the run is not incomplete. @@ -1659,6 +1767,25 @@ class InternalConnectionProperties(_model_base.Model): target: str = rest_field() """The connection URL to be used for this service. Required.""" + @overload + def __init__( + self, + *, + auth_type: str, + category: Union[str, "_models.ConnectionType"], + target: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + class InternalConnectionPropertiesAADAuth(InternalConnectionProperties, discriminator="AAD"): """Connection properties for connections with AAD authentication (aka ``Entra ID passthrough``\\ @@ -1679,6 +1806,24 @@ class InternalConnectionPropertiesAADAuth(InternalConnectionProperties, discrimi """Authentication type of the connection target. Required. Entra ID authentication (formerly known as AAD)""" + @overload + def __init__( + self, + *, + category: Union[str, "_models.ConnectionType"], + target: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, auth_type=AuthenticationType.ENTRA_ID, **kwargs) + class InternalConnectionPropertiesApiKeyAuth(InternalConnectionProperties, discriminator="ApiKey"): """Connection properties for connections with API key authentication. @@ -1700,6 +1845,25 @@ class InternalConnectionPropertiesApiKeyAuth(InternalConnectionProperties, discr credentials: "_models._models.CredentialsApiKeyAuth" = rest_field() """Credentials will only be present for authType=ApiKey. Required.""" + @overload + def __init__( + self, + *, + category: Union[str, "_models.ConnectionType"], + target: str, + credentials: "_models._models.CredentialsApiKeyAuth", + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, auth_type=AuthenticationType.API_KEY, **kwargs) + class InternalConnectionPropertiesSASAuth(InternalConnectionProperties, discriminator="SAS"): """Connection properties for connections with SAS authentication. @@ -1723,6 +1887,25 @@ class InternalConnectionPropertiesSASAuth(InternalConnectionProperties, discrimi credentials: "_models._models.CredentialsSASAuth" = rest_field() """Credentials will only be present for authType=ApiKey. Required.""" + @overload + def __init__( + self, + *, + category: Union[str, "_models.ConnectionType"], + target: str, + credentials: "_models._models.CredentialsSASAuth", + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, auth_type=AuthenticationType.SAS, **kwargs) + class ListConnectionsResponse(_model_base.Model): """Response from the list operation. @@ -1735,6 +1918,23 @@ class ListConnectionsResponse(_model_base.Model): value: List["_models._models.GetConnectionResponse"] = rest_field() """A list of connection list secrets. Required.""" + @overload + def __init__( + self, + *, + value: List["_models._models.GetConnectionResponse"], + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + class MessageAttachment(_model_base.Model): """This describes to which tools a file has been attached. @@ -4303,7 +4503,7 @@ class RunStepDeltaCodeInterpreterDetailItemObject(_model_base.Model): # pylint: """The input into the Code Interpreter tool call.""" outputs: Optional[List["_models.RunStepDeltaCodeInterpreterOutput"]] = rest_field() """The outputs from the Code Interpreter tool call. Code Interpreter can output one or more - items, including text (\\ ``logs``\\ ) or images (\\ ``image``\\ ). Each of these are represented + items, including text (\ ``logs``\ ) or images (\ ``image``\ ). Each of these are represented by a different object type.""" @@ -5578,9 +5778,9 @@ class ThreadMessageOptions(_model_base.Model): """The role of the entity that is creating the message. Allowed values include: - * ``user``\\ : Indicates the message is sent by an actual user and should be used in most + * ``user``\ : Indicates the message is sent by an actual user and should be used in most cases to represent user-generated messages. - * ``assistant``\\ : Indicates the message is generated by the agent. Use this value to insert + * ``assistant``\ : Indicates the message is generated by the agent. Use this value to insert messages from the agent into the conversation. Required. Known values are: \"user\" and \"assistant\".""" content: str = rest_field() @@ -5741,7 +5941,7 @@ class ThreadRun(_model_base.Model): """Details on why the run is incomplete. Will be ``null`` if the run is not incomplete. Required.""" usage: "_models.RunCompletionUsage" = rest_field() """Usage statistics related to the run. This value will be ``null`` if the run is not in a - terminal state (i.e. ``in_progress``\\ , ``queued``\\ , etc.). Required.""" + terminal state (i.e. ``in_progress``\ , ``queued``\ , etc.). Required.""" temperature: Optional[float] = rest_field() """The sampling temperature used for this run. If not set, defaults to 1.""" top_p: Optional[float] = rest_field() @@ -5978,9 +6178,9 @@ class TruncationObject(_model_base.Model): type: Union[str, "_models.TruncationStrategy"] = rest_field() """The truncation strategy to use for the thread. The default is ``auto``. If set to - ``last_messages``\\ , the thread will + ``last_messages``\ , the thread will be truncated to the ``lastMessages`` count most recent messages in the thread. When set to - ``auto``\\ , messages in the middle of the thread + ``auto``\ , messages in the middle of the thread will be dropped to fit the context length of the model, ``max_prompt_tokens``. Required. Known values are: \"auto\" and \"last_messages\".""" last_messages: Optional[int] = rest_field() @@ -6162,7 +6362,7 @@ class VectorStore(_model_base.Model): file_counts: "_models.VectorStoreFileCount" = rest_field() """Files count grouped by status processed or being processed by this vector store. Required.""" status: Union[str, "_models.VectorStoreStatus"] = rest_field() - """The status of the vector store, which can be either ``expired``\\ , ``in_progress``\\ , or + """The status of the vector store, which can be either ``expired``\ , ``in_progress``\ , or ``completed``. A status of ``completed`` indicates that the vector store is ready for use. Required. Known values are: \"expired\", \"in_progress\", and \"completed\".""" expires_after: Optional["_models.VectorStoreExpirationPolicy"] = rest_field() @@ -6553,8 +6753,8 @@ class VectorStoreFile(_model_base.Model): vector_store_id: str = rest_field() """The ID of the vector store that the file is attached to. Required.""" status: Union[str, "_models.VectorStoreFileStatus"] = rest_field() - """The status of the vector store file, which can be either ``in_progress``\\ , ``completed``\\ , - ``cancelled``\\ , or ``failed``. The status ``completed`` indicates that the vector store file + """The status of the vector store file, which can be either ``in_progress``\ , ``completed``\ , + ``cancelled``\ , or ``failed``. The status ``completed`` indicates that the vector store file is ready for use. Required. Known values are: \"in_progress\", \"completed\", \"failed\", and \"cancelled\".""" last_error: "_models.VectorStoreFileError" = rest_field() @@ -6623,8 +6823,8 @@ class VectorStoreFileBatch(_model_base.Model): vector_store_id: str = rest_field() """The ID of the vector store that the file is attached to. Required.""" status: Union[str, "_models.VectorStoreFileBatchStatus"] = rest_field() - """The status of the vector store files batch, which can be either ``in_progress``\\ , - ``completed``\\ , ``cancelled`` or ``failed``. Required. Known values are: \"in_progress\", + """The status of the vector store files batch, which can be either ``in_progress``\ , + ``completed``\ , ``cancelled`` or ``failed``. Required. Known values are: \"in_progress\", \"completed\", \"cancelled\", and \"failed\".""" file_counts: "_models.VectorStoreFileCount" = rest_field() """Files count grouped by status processed or being processed by this vector store. Required.""" @@ -6898,3 +7098,20 @@ class WorkspaceProperties(_model_base.Model): application_insights: str = rest_field(name="applicationInsights") """Authentication type of the connection target. Required.""" + + @overload + def __init__( + self, + *, + application_insights: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_operations.py b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_operations.py index 88fce5b81450..4126d8a21ef8 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_operations.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_operations.py @@ -12,6 +12,7 @@ from typing import Any, Callable, Dict, IO, Iterable, List, Optional, TYPE_CHECKING, TypeVar, Union, overload import urllib.parse +from azure.core import PipelineClient from azure.core.exceptions import ( ClientAuthenticationError, HttpResponseError, @@ -29,8 +30,9 @@ from azure.core.utils import case_insensitive_dict from .. import _model_base, models as _models +from .._configuration import AIProjectClientConfiguration from .._model_base import SdkJSONEncoder, _deserialize -from .._serialization import Serializer +from .._serialization import Deserializer, Serializer from .._vendor import FileType, prepare_multipart_form_data if sys.version_info >= (3, 9): @@ -1525,10 +1527,10 @@ class AgentsOperations: # pylint: disable=too-many-public-methods def __init__(self, *args, **kwargs): input_args = list(args) - self._client = input_args.pop(0) if input_args else kwargs.pop("client") - self._config = input_args.pop(0) if input_args else kwargs.pop("config") - self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") - self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") + self._client: PipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") + self._config: AIProjectClientConfiguration = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") @overload def create_agent(self, body: JSON, *, content_type: str = "application/json", **kwargs: Any) -> _models.Agent: @@ -5772,7 +5774,7 @@ def create_vector_store_file( *, content_type: str = "application/json", file_id: Optional[str] = None, - data_sources: Optional[List[_models.VectorStoreDataSource]] = None, + data_source: Optional[_models.VectorStoreDataSource] = None, chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, **kwargs: Any ) -> _models.VectorStoreFile: @@ -5785,8 +5787,8 @@ def create_vector_store_file( :paramtype content_type: str :keyword file_id: Identifier of the file. Default value is None. :paramtype file_id: str - :keyword data_sources: Azure asset ID. Default value is None. - :paramtype data_sources: list[~azure.ai.projects.models.VectorStoreDataSource] + :keyword data_source: Azure asset ID. Default value is None. + :paramtype data_source: ~azure.ai.projects.models.VectorStoreDataSource :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will use the auto strategy. Default value is None. :paramtype chunking_strategy: ~azure.ai.projects.models.VectorStoreChunkingStrategyRequest @@ -5820,7 +5822,7 @@ def create_vector_store_file( body: Union[JSON, IO[bytes]] = _Unset, *, file_id: Optional[str] = None, - data_sources: Optional[List[_models.VectorStoreDataSource]] = None, + data_source: Optional[_models.VectorStoreDataSource] = None, chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, **kwargs: Any ) -> _models.VectorStoreFile: @@ -5832,8 +5834,8 @@ def create_vector_store_file( :type body: JSON or IO[bytes] :keyword file_id: Identifier of the file. Default value is None. :paramtype file_id: str - :keyword data_sources: Azure asset ID. Default value is None. - :paramtype data_sources: list[~azure.ai.projects.models.VectorStoreDataSource] + :keyword data_source: Azure asset ID. Default value is None. + :paramtype data_source: ~azure.ai.projects.models.VectorStoreDataSource :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will use the auto strategy. Default value is None. :paramtype chunking_strategy: ~azure.ai.projects.models.VectorStoreChunkingStrategyRequest @@ -5856,7 +5858,7 @@ def create_vector_store_file( cls: ClsType[_models.VectorStoreFile] = kwargs.pop("cls", None) if body is _Unset: - body = {"chunking_strategy": chunking_strategy, "data_sources": data_sources, "file_id": file_id} + body = {"chunking_strategy": chunking_strategy, "data_source": data_source, "file_id": file_id} body = {k: v for k, v in body.items() if v is not None} content_type = content_type or "application/json" _content = None @@ -6470,10 +6472,10 @@ class ConnectionsOperations: def __init__(self, *args, **kwargs): input_args = list(args) - self._client = input_args.pop(0) if input_args else kwargs.pop("client") - self._config = input_args.pop(0) if input_args else kwargs.pop("config") - self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") - self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") + self._client: PipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") + self._config: AIProjectClientConfiguration = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") @distributed_trace def _get_workspace(self, **kwargs: Any) -> _models._models.GetWorkspaceResponse: @@ -6802,10 +6804,10 @@ class TelemetryOperations: def __init__(self, *args, **kwargs): input_args = list(args) - self._client = input_args.pop(0) if input_args else kwargs.pop("client") - self._config = input_args.pop(0) if input_args else kwargs.pop("config") - self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") - self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") + self._client: PipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") + self._config: AIProjectClientConfiguration = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") @distributed_trace def _get_app_insights( @@ -6893,10 +6895,10 @@ class EvaluationsOperations: def __init__(self, *args, **kwargs): input_args = list(args) - self._client = input_args.pop(0) if input_args else kwargs.pop("client") - self._config = input_args.pop(0) if input_args else kwargs.pop("config") - self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") - self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") + self._client: PipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") + self._config: AIProjectClientConfiguration = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") @distributed_trace def get(self, id: str, **kwargs: Any) -> _models.Evaluation: diff --git a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_vector_store_file_search_async.py b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_vector_store_file_search_async.py index 7a3409820d68..4f177af6126b 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_vector_store_file_search_async.py +++ b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_vector_store_file_search_async.py @@ -20,7 +20,11 @@ import os from azure.ai.projects.aio import AIProjectClient -from azure.ai.projects.models import FileSearchTool, FilePurpose +from azure.ai.projects.models import ( + FileSearchTool, + FilePurpose, + MessageTextContent +) from azure.identity.aio import DefaultAzureCredential @@ -37,7 +41,7 @@ async def main(): # Create a vector store with no file and wait for it to be processed vector_store = await project_client.agents.create_vector_store_and_poll( - file_ids=[], name="sample_vector_store" + file_ids=[file.id], name="sample_vector_store" ) print(f"Created vector store, vector store ID: {vector_store.id}") @@ -72,8 +76,14 @@ async def main(): print("Deleted agent") messages = await project_client.agents.list_messages(thread_id=thread.id) - print(f"Messages: {messages}") - + + for message in reversed(messages.data): + # To remove characters, which are not correctly handled by print, we will encode the message + # and then decode it again. + clean_message = "\n".join( + text_msg.text.value.encode('ascii', 'ignore').decode('utf-8') for text_msg in message.text_messages + ) + print(f"Role: {message.role} Message: {clean_message}") if __name__ == "__main__": asyncio.run(main()) diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_vector_store_file_search.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_vector_store_file_search.py index f832642bb92e..7f415fd2344d 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_vector_store_file_search.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_vector_store_file_search.py @@ -22,7 +22,11 @@ import os from azure.ai.projects import AIProjectClient -from azure.ai.projects.models import FileSearchTool, FilePurpose +from azure.ai.projects.models import ( + FileSearchTool, + FilePurpose, + MessageTextContent +) from azure.identity import DefaultAzureCredential project_client = AIProjectClient.from_connection_string( @@ -36,7 +40,7 @@ print(f"Uploaded file, file ID: {file.id}") # Create a vector store with no file and wait for it to be processed - vector_store = project_client.agents.create_vector_store_and_poll(file_ids=[], name="sample_vector_store") + vector_store = project_client.agents.create_vector_store_and_poll(file_ids=[file.id], name="sample_vector_store") print(f"Created vector store, vector store ID: {vector_store.id}") # Create a file search tool @@ -70,4 +74,11 @@ print("Deleted agent") messages = project_client.agents.list_messages(thread_id=thread.id) - print(f"Messages: {messages}") + + for message in reversed(messages.data): + # To remove characters, which are not correctly handled by print, we will encode the message + # and then decode it again. + clean_message = "\n".join( + text_msg.text.value.encode('ascii', 'ignore').decode('utf-8') for text_msg in message.text_messages + ) + print(f"Role: {message.role} Message: {clean_message}") diff --git a/sdk/ai/azure-ai-projects/tests/agents/test_agents_client.py b/sdk/ai/azure-ai-projects/tests/agents/test_agents_client.py index ab135a0c2928..6fed0a59ba6b 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/test_agents_client.py +++ b/sdk/ai/azure-ai-projects/tests/agents/test_agents_client.py @@ -2370,7 +2370,6 @@ def test_create_vector_store_add_file_file_id(self, **kwargs): self._do_test_create_vector_store_add_file(file_path=self._get_data_file(), **kwargs) @agentClientPreparer() - # @pytest.markp("The CreateVectorStoreFile API is not supported yet.") @pytest.mark.skip("Not deployed in all regions.") @recorded_by_proxy def test_create_vector_store_add_file_azure(self, **kwargs): @@ -2387,16 +2386,14 @@ def _do_test_create_vector_store_add_file(self, **kwargs): if file_id: ds = None else: - ds = [ - VectorStoreDataSource( - asset_identifier=kwargs["azure_ai_projects_agents_tests_data_path"], - asset_type="uri_asset", - ) - ] + ds = VectorStoreDataSource( + asset_identifier=kwargs["azure_ai_projects_agents_tests_data_path"], + asset_type="uri_asset", + ) vector_store = ai_client.agents.create_vector_store_and_poll(file_ids=[], name="sample_vector_store") assert vector_store.id vector_store_file = ai_client.agents.create_vector_store_file( - vector_store_id=vector_store.id, data_sources=ds, file_id=file_id + vector_store_id=vector_store.id, data_source=ds, file_id=file_id ) assert vector_store_file.id self._test_file_search(ai_client, vector_store, file_id) diff --git a/sdk/ai/azure-ai-projects/tests/agents/test_agents_client_async.py b/sdk/ai/azure-ai-projects/tests/agents/test_agents_client_async.py index 3d2f11a7f68b..7aa41ac4a53b 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/test_agents_client_async.py +++ b/sdk/ai/azure-ai-projects/tests/agents/test_agents_client_async.py @@ -2263,16 +2263,14 @@ async def _do_test_create_vector_store_add_file(self, **kwargs): if file_id: ds = None else: - ds = [ - VectorStoreDataSource( - asset_identifier=kwargs["azure_ai_projects_agents_tests_data_path"], - asset_type=VectorStoreDataSourceAssetType.URI_ASSET, - ) - ] + ds = VectorStoreDataSource( + asset_identifier=kwargs["azure_ai_projects_agents_tests_data_path"], + asset_type=VectorStoreDataSourceAssetType.URI_ASSET, + ) vector_store = await ai_client.agents.create_vector_store_and_poll(file_ids=[], name="sample_vector_store") assert vector_store.id vector_store_file = await ai_client.agents.create_vector_store_file( - vector_store_id=vector_store.id, data_sources=ds, file_id=file_id + vector_store_id=vector_store.id, data_source=ds, file_id=file_id ) assert vector_store_file.id await self._test_file_search(ai_client, vector_store, file_id) diff --git a/sdk/ai/azure-ai-projects/tsp-location.yaml b/sdk/ai/azure-ai-projects/tsp-location.yaml index 7a46a66db865..f05c76ce3b88 100644 --- a/sdk/ai/azure-ai-projects/tsp-location.yaml +++ b/sdk/ai/azure-ai-projects/tsp-location.yaml @@ -1,4 +1,4 @@ directory: specification/ai/Azure.AI.Projects -commit: 6b49333fc5c2a10c6f1b4b88b13ecce50a2b2af1 +commit: 84f3f74acf7fd611b4cfc8235792264f4e832300 repo: Azure/azure-rest-api-specs additionalDirectories: From a857adc0b6067174e167762c3df3aafeab6b305c Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Thu, 23 Jan 2025 08:54:32 -0800 Subject: [PATCH 04/16] Set default Application-ID in "user-agent" HTTP request header, for azure-ai-inference clients you get from azure-ai-project client (#39348) --- sdk/ai/azure-ai-projects/README.md | 7 +- sdk/ai/azure-ai-projects/assets.json | 2 +- .../azure/ai/projects/_patch.py | 4 +- .../azure/ai/projects/aio/_patch.py | 4 +- .../ai/projects/aio/operations/_patch.py | 81 +++++++++++++----- .../azure/ai/projects/operations/_patch.py | 85 +++++++++++++------ ...e_agents_vector_store_file_search_async.py | 11 +-- .../sample_agents_vector_store_file_search.py | 10 +-- sdk/ai/azure-ai-projects/setup.py | 10 +-- .../tests/inference/inference_test_base.py | 11 +++ .../tests/inference/test_inference.py | 38 +++++++-- .../tests/inference/test_inference_async.py | 30 +++++-- 12 files changed, 213 insertions(+), 80 deletions(-) diff --git a/sdk/ai/azure-ai-projects/README.md b/sdk/ai/azure-ai-projects/README.md index ef25aea1ff40..b787c222ab83 100644 --- a/sdk/ai/azure-ai-projects/README.md +++ b/sdk/ai/azure-ai-projects/README.md @@ -17,6 +17,10 @@ For example, get the inference endpoint URL and credentials associated with your | [SDK source code](https://aka.ms/azsdk/azure-ai-projects/python/code) | [AI Starter Template](https://aka.ms/azsdk/azure-ai-projects/python/ai-starter-template) +## Reporting issues + +To report an issue with the client library, or request additional features, please open a GitHub issue [here](https://github.com/Azure/azure-sdk-for-python/issues). Mention the package name "azure-ai-projects" in the title or content. + ## Table of contents - [Getting started](#getting-started) @@ -1192,7 +1196,8 @@ For more information, see [Configure logging in the Azure libraries for Python]( ### Reporting issues -To report issues with the client library, or request additional features, please open a GitHub issue [here](https://github.com/Azure/azure-sdk-for-python/issues) +To report an issue with the client library, or request additional features, please open a GitHub issue [here](https://github.com/Azure/azure-sdk-for-python/issues). Mention the package name "azure-ai-projects" in the title or content. + ## Next steps diff --git a/sdk/ai/azure-ai-projects/assets.json b/sdk/ai/azure-ai-projects/assets.json index 56404b36f900..2483d0b9f212 100644 --- a/sdk/ai/azure-ai-projects/assets.json +++ b/sdk/ai/azure-ai-projects/assets.json @@ -2,5 +2,5 @@ "AssetsRepo": "Azure/azure-sdk-assets", "AssetsRepoPrefixPath": "python", "TagPrefix": "python/ai/azure-ai-projects", - "Tag": "python/ai/azure-ai-projects_40731b58e1" + "Tag": "python/ai/azure-ai-projects_55ba14b3a7" } diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/_patch.py index 226dcf4e0ce9..9bc0729de4da 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/_patch.py @@ -9,7 +9,7 @@ import uuid from os import PathLike from pathlib import Path -from typing import Any, Dict, List, Tuple, Union +from typing import Any, Dict, List, Tuple, Union, Optional from typing_extensions import Self from azure.core import PipelineClient @@ -56,6 +56,8 @@ def __init__( # pylint: disable=super-init-not-called,too-many-statements kwargs2 = kwargs.copy() kwargs3 = kwargs.copy() + self._user_agent: Optional[str] = kwargs.get("user_agent", None) + # For getting AppInsights connection string from the AppInsights resource. # The AppInsights resource URL is not known at this point. We need to get it from the # AzureML "Workspace - Get" REST API call. It will have the form: diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/_patch.py index 3398a18545dd..61b035c5bec7 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/_patch.py @@ -9,7 +9,7 @@ import uuid from os import PathLike from pathlib import Path -from typing import TYPE_CHECKING, Any, Dict, List, Tuple, Union +from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union from typing_extensions import Self from azure.core import AsyncPipelineClient @@ -64,6 +64,8 @@ def __init__( # pylint: disable=super-init-not-called,too-many-statements kwargs2 = kwargs.copy() kwargs3 = kwargs.copy() + self._user_agent: Optional[str] = kwargs.get("user_agent", None) + # For getting AppInsights connection string from the AppInsights resource. # The AppInsights resource URL is not known at this point. We need to get it from the # AzureML "Workspace - Get" REST API call. It will have the form: diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py index 2e749fb5efea..3bc909516929 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py @@ -68,6 +68,19 @@ class InferenceOperations: def __init__(self, outer_instance): + + # All returned inference clients will have this application id set on their user-agent. + # For more info on user-agent HTTP header, see: + # https://azure.github.io/azure-sdk/general_azurecore.html#telemetry-policy + USER_AGENT_APP_ID = "AIProjectClient" + + if hasattr(outer_instance, "_user_agent") and outer_instance._user_agent: + # If the calling application has set "user_agent" when constructing the AIProjectClient, + # take that value and prepend it to USER_AGENT_APP_ID. + self._user_agent = f"{outer_instance._user_agent}-{USER_AGENT_APP_ID}" + else: + self._user_agent = USER_AGENT_APP_ID + self._outer_instance = outer_instance @distributed_trace_async @@ -76,7 +89,8 @@ async def get_chat_completions_client( ) -> "ChatCompletionsClient": """Get an authenticated asynchronous ChatCompletionsClient (from the package azure-ai-inference) for the default Azure AI Services connected resource (if `connection_name` is not specificed), or from the Azure AI - Services resource given by its connection name. + Services resource given by its connection name. Keyword arguments are passed to the constructor of + ChatCompletionsClient. At least one AI model that supports chat completions must be deployed in this resource. @@ -107,16 +121,16 @@ async def get_chat_completions_client( if connection_name: connection = await self._outer_instance.connections.get( - connection_name=connection_name, include_credentials=True, **kwargs + connection_name=connection_name, include_credentials=True ) else: if use_serverless_connection: connection = await self._outer_instance.connections.get_default( - connection_type=ConnectionType.SERVERLESS, include_credentials=True, **kwargs + connection_type=ConnectionType.SERVERLESS, include_credentials=True ) else: connection = await self._outer_instance.connections.get_default( - connection_type=ConnectionType.AZURE_AI_SERVICES, include_credentials=True, **kwargs + connection_type=ConnectionType.AZURE_AI_SERVICES, include_credentials=True ) logger.debug("[InferenceOperations.get_chat_completions_client] connection = %s", str(connection)) @@ -142,14 +156,23 @@ async def get_chat_completions_client( ) from azure.core.credentials import AzureKeyCredential - client = ChatCompletionsClient(endpoint=endpoint, credential=AzureKeyCredential(connection.key)) + client = ChatCompletionsClient( + endpoint=endpoint, + credential=AzureKeyCredential(connection.key), + user_agent=kwargs.pop("user_agent", self._user_agent), + **kwargs, + ) elif connection.authentication_type == AuthenticationType.ENTRA_ID: logger.debug( "[InferenceOperations.get_chat_completions_client]" + " Creating ChatCompletionsClient using Entra ID authentication" ) client = ChatCompletionsClient( - endpoint=endpoint, credential=connection.token_credential, credential_scopes=credential_scopes + endpoint=endpoint, + credential=connection.token_credential, + credential_scopes=credential_scopes, + user_agent=kwargs.pop("user_agent", self._user_agent), + **kwargs, ) elif connection.authentication_type == AuthenticationType.SAS: logger.debug( @@ -168,7 +191,8 @@ async def get_chat_completions_client( async def get_embeddings_client(self, *, connection_name: Optional[str] = None, **kwargs) -> "EmbeddingsClient": """Get an authenticated asynchronous EmbeddingsClient (from the package azure-ai-inference) for the default Azure AI Services connected resource (if `connection_name` is not specificed), or from the Azure AI - Services resource given by its connection name. + Services resource given by its connection name. Keyword arguments are passed to the constructor of + EmbeddingsClient. At least one AI model that supports text embeddings must be deployed in this resource. @@ -199,16 +223,16 @@ async def get_embeddings_client(self, *, connection_name: Optional[str] = None, if connection_name: connection = await self._outer_instance.connections.get( - connection_name=connection_name, include_credentials=True, **kwargs + connection_name=connection_name, include_credentials=True ) else: if use_serverless_connection: connection = await self._outer_instance.connections.get_default( - connection_type=ConnectionType.SERVERLESS, include_credentials=True, **kwargs + connection_type=ConnectionType.SERVERLESS, include_credentials=True ) else: connection = await self._outer_instance.connections.get_default( - connection_type=ConnectionType.AZURE_AI_SERVICES, include_credentials=True, **kwargs + connection_type=ConnectionType.AZURE_AI_SERVICES, include_credentials=True ) logger.debug("[InferenceOperations.get_embeddings_client] connection = %s", str(connection)) @@ -233,13 +257,22 @@ async def get_embeddings_client(self, *, connection_name: Optional[str] = None, ) from azure.core.credentials import AzureKeyCredential - client = EmbeddingsClient(endpoint=endpoint, credential=AzureKeyCredential(connection.key)) + client = EmbeddingsClient( + endpoint=endpoint, + credential=AzureKeyCredential(connection.key), + user_agent=kwargs.pop("user_agent", self._user_agent), + **kwargs, + ) elif connection.authentication_type == AuthenticationType.ENTRA_ID: logger.debug( "[InferenceOperations.get_embeddings_client] Creating EmbeddingsClient using Entra ID authentication" ) client = EmbeddingsClient( - endpoint=endpoint, credential=connection.token_credential, credential_scopes=credential_scopes + endpoint=endpoint, + credential=connection.token_credential, + credential_scopes=credential_scopes, + user_agent=kwargs.pop("user_agent", self._user_agent), + **kwargs, ) elif connection.authentication_type == AuthenticationType.SAS: logger.debug( @@ -257,7 +290,8 @@ async def get_image_embeddings_client( ) -> "ImageEmbeddingsClient": """Get an authenticated asynchronous ImageEmbeddingsClient (from the package azure-ai-inference) for the default Azure AI Services connected resource (if `connection_name` is not specificed), or from the Azure AI - Services resource given by its connection name. + Services resource given by its connection name. Keyword arguments are passed to the constructor of + ImageEmbeddingsClient. At least one AI model that supports image embeddings must be deployed in this resource. @@ -288,16 +322,16 @@ async def get_image_embeddings_client( if connection_name: connection = await self._outer_instance.connections.get( - connection_name=connection_name, include_credentials=True, **kwargs + connection_name=connection_name, include_credentials=True ) else: if use_serverless_connection: connection = await self._outer_instance.connections.get_default( - connection_type=ConnectionType.SERVERLESS, include_credentials=True, **kwargs + connection_type=ConnectionType.SERVERLESS, include_credentials=True ) else: connection = await self._outer_instance.connections.get_default( - connection_type=ConnectionType.AZURE_AI_SERVICES, include_credentials=True, **kwargs + connection_type=ConnectionType.AZURE_AI_SERVICES, include_credentials=True ) logger.debug("[InferenceOperations.get_embeddings_client] connection = %s", str(connection)) @@ -323,14 +357,23 @@ async def get_image_embeddings_client( ) from azure.core.credentials import AzureKeyCredential - client = ImageEmbeddingsClient(endpoint=endpoint, credential=AzureKeyCredential(connection.key)) + client = ImageEmbeddingsClient( + endpoint=endpoint, + credential=AzureKeyCredential(connection.key), + user_agent=kwargs.pop("user_agent", self._user_agent), + **kwargs, + ) elif connection.authentication_type == AuthenticationType.ENTRA_ID: logger.debug( "[InferenceOperations.get_image_embeddings_client] " "Creating ImageEmbeddingsClient using Entra ID authentication" ) client = ImageEmbeddingsClient( - endpoint=endpoint, credential=connection.token_credential, credential_scopes=credential_scopes + endpoint=endpoint, + credential=connection.token_credential, + credential_scopes=credential_scopes, + user_agent=kwargs.pop("user_agent", self._user_agent), + **kwargs, ) elif connection.authentication_type == AuthenticationType.SAS: logger.debug( @@ -501,7 +544,7 @@ async def get( from ...models._patch import SASTokenCredential cred_prop = cast(InternalConnectionPropertiesSASAuth, connection.properties) - sync_credential = _SyncCredentialWrapper(self._config.credential) + sync_credential = _SyncCredentialWrapper(self._config.credential) token_credential = SASTokenCredential( sas_token=cred_prop.credentials.sas, diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py index 43a91509e65f..4a9d5b2b8a7f 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py @@ -68,6 +68,19 @@ class InferenceOperations: def __init__(self, outer_instance): + + # All returned inference clients will have this application id set on their user-agent. + # For more info on user-agent HTTP header, see: + # https://azure.github.io/azure-sdk/general_azurecore.html#telemetry-policy + USER_AGENT_APP_ID = "AIProjectClient" + + if hasattr(outer_instance, "_user_agent") and outer_instance._user_agent: + # If the calling application has set "user_agent" when constructing the AIProjectClient, + # take that value and prepend it to USER_AGENT_APP_ID. + self._user_agent = f"{outer_instance._user_agent}-{USER_AGENT_APP_ID}" + else: + self._user_agent = USER_AGENT_APP_ID + self._outer_instance = outer_instance @distributed_trace @@ -76,7 +89,8 @@ def get_chat_completions_client( ) -> "ChatCompletionsClient": """Get an authenticated ChatCompletionsClient (from the package azure-ai-inference) for the default Azure AI Services connected resource (if `connection_name` is not specificed), or from the Azure AI - Services resource given by its connection name. + Services resource given by its connection name. Keyword arguments are passed to the constructor of + ChatCompletionsClient. At least one AI model that supports chat completions must be deployed in this resource. @@ -106,17 +120,15 @@ def get_chat_completions_client( use_serverless_connection: bool = os.getenv("USE_SERVERLESS_CONNECTION", None) == "true" if connection_name: - connection = self._outer_instance.connections.get( - connection_name=connection_name, include_credentials=True, **kwargs - ) + connection = self._outer_instance.connections.get(connection_name=connection_name, include_credentials=True) else: if use_serverless_connection: connection = self._outer_instance.connections.get_default( - connection_type=ConnectionType.SERVERLESS, include_credentials=True, **kwargs + connection_type=ConnectionType.SERVERLESS, include_credentials=True ) else: connection = self._outer_instance.connections.get_default( - connection_type=ConnectionType.AZURE_AI_SERVICES, include_credentials=True, **kwargs + connection_type=ConnectionType.AZURE_AI_SERVICES, include_credentials=True ) logger.debug("[InferenceOperations.get_chat_completions_client] connection = %s", str(connection)) @@ -142,14 +154,23 @@ def get_chat_completions_client( ) from azure.core.credentials import AzureKeyCredential - client = ChatCompletionsClient(endpoint=endpoint, credential=AzureKeyCredential(connection.key)) + client = ChatCompletionsClient( + endpoint=endpoint, + credential=AzureKeyCredential(connection.key), + user_agent=kwargs.pop("user_agent", self._user_agent), + **kwargs, + ) elif connection.authentication_type == AuthenticationType.ENTRA_ID: logger.debug( "[InferenceOperations.get_chat_completions_client] " + "Creating ChatCompletionsClient using Entra ID authentication" ) client = ChatCompletionsClient( - endpoint=endpoint, credential=connection.token_credential, credential_scopes=credential_scopes + endpoint=endpoint, + credential=connection.token_credential, + credential_scopes=credential_scopes, + user_agent=kwargs.pop("user_agent", self._user_agent), + **kwargs, ) elif connection.authentication_type == AuthenticationType.SAS: logger.debug( @@ -168,7 +189,8 @@ def get_chat_completions_client( def get_embeddings_client(self, *, connection_name: Optional[str] = None, **kwargs) -> "EmbeddingsClient": """Get an authenticated EmbeddingsClient (from the package azure-ai-inference) for the default Azure AI Services connected resource (if `connection_name` is not specificed), or from the Azure AI - Services resource given by its connection name. + Services resource given by its connection name. Keyword arguments are passed to the constructor of + EmbeddingsClient. At least one AI model that supports text embeddings must be deployed in this resource. @@ -198,17 +220,15 @@ def get_embeddings_client(self, *, connection_name: Optional[str] = None, **kwar use_serverless_connection: bool = os.getenv("USE_SERVERLESS_CONNECTION", None) == "true" if connection_name: - connection = self._outer_instance.connections.get( - connection_name=connection_name, include_credentials=True, **kwargs - ) + connection = self._outer_instance.connections.get(connection_name=connection_name, include_credentials=True) else: if use_serverless_connection: connection = self._outer_instance.connections.get_default( - connection_type=ConnectionType.SERVERLESS, include_credentials=True, **kwargs + connection_type=ConnectionType.SERVERLESS, include_credentials=True ) else: connection = self._outer_instance.connections.get_default( - connection_type=ConnectionType.AZURE_AI_SERVICES, include_credentials=True, **kwargs + connection_type=ConnectionType.AZURE_AI_SERVICES, include_credentials=True ) logger.debug("[InferenceOperations.get_embeddings_client] connection = %s", str(connection)) @@ -233,13 +253,22 @@ def get_embeddings_client(self, *, connection_name: Optional[str] = None, **kwar ) from azure.core.credentials import AzureKeyCredential - client = EmbeddingsClient(endpoint=endpoint, credential=AzureKeyCredential(connection.key)) + client = EmbeddingsClient( + endpoint=endpoint, + credential=AzureKeyCredential(connection.key), + user_agent=kwargs.pop("user_agent", self._user_agent), + **kwargs, + ) elif connection.authentication_type == AuthenticationType.ENTRA_ID: logger.debug( "[InferenceOperations.get_embeddings_client] Creating EmbeddingsClient using Entra ID authentication" ) client = EmbeddingsClient( - endpoint=endpoint, credential=connection.token_credential, credential_scopes=credential_scopes + endpoint=endpoint, + credential=connection.token_credential, + credential_scopes=credential_scopes, + user_agent=kwargs.pop("user_agent", self._user_agent), + **kwargs, ) elif connection.authentication_type == AuthenticationType.SAS: logger.debug( @@ -257,7 +286,8 @@ def get_image_embeddings_client( ) -> "ImageEmbeddingsClient": """Get an authenticated ImageEmbeddingsClient (from the package azure-ai-inference) for the default Azure AI Services connected resource (if `connection_name` is not specificed), or from the Azure AI - Services resource given by its connection name. + Services resource given by its connection name. Keyword arguments are passed to the constructor of + ImageEmbeddingsClient. At least one AI model that supports image embeddings must be deployed in this resource. @@ -287,17 +317,15 @@ def get_image_embeddings_client( use_serverless_connection: bool = os.getenv("USE_SERVERLESS_CONNECTION", None) == "true" if connection_name: - connection = self._outer_instance.connections.get( - connection_name=connection_name, include_credentials=True, **kwargs - ) + connection = self._outer_instance.connections.get(connection_name=connection_name, include_credentials=True) else: if use_serverless_connection: connection = self._outer_instance.connections.get_default( - connection_type=ConnectionType.SERVERLESS, include_credentials=True, **kwargs + connection_type=ConnectionType.SERVERLESS, include_credentials=True ) else: connection = self._outer_instance.connections.get_default( - connection_type=ConnectionType.AZURE_AI_SERVICES, include_credentials=True, **kwargs + connection_type=ConnectionType.AZURE_AI_SERVICES, include_credentials=True ) logger.debug("[InferenceOperations.get_embeddings_client] connection = %s", str(connection)) @@ -323,14 +351,23 @@ def get_image_embeddings_client( ) from azure.core.credentials import AzureKeyCredential - client = ImageEmbeddingsClient(endpoint=endpoint, credential=AzureKeyCredential(connection.key)) + client = ImageEmbeddingsClient( + endpoint=endpoint, + credential=AzureKeyCredential(connection.key), + user_agent=kwargs.pop("user_agent", self._user_agent), + **kwargs, + ) elif connection.authentication_type == AuthenticationType.ENTRA_ID: logger.debug( "[InferenceOperations.get_image_embeddings_client] " "Creating ImageEmbeddingsClient using Entra ID authentication" ) client = ImageEmbeddingsClient( - endpoint=endpoint, credential=connection.token_credential, credential_scopes=credential_scopes + endpoint=endpoint, + credential=connection.token_credential, + credential_scopes=credential_scopes, + user_agent=kwargs.pop("user_agent", self._user_agent), + **kwargs, ) elif connection.authentication_type == AuthenticationType.SAS: logger.debug( diff --git a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_vector_store_file_search_async.py b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_vector_store_file_search_async.py index 4f177af6126b..bb39337f5896 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_vector_store_file_search_async.py +++ b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_vector_store_file_search_async.py @@ -20,11 +20,7 @@ import os from azure.ai.projects.aio import AIProjectClient -from azure.ai.projects.models import ( - FileSearchTool, - FilePurpose, - MessageTextContent -) +from azure.ai.projects.models import FileSearchTool, FilePurpose, MessageTextContent from azure.identity.aio import DefaultAzureCredential @@ -76,14 +72,15 @@ async def main(): print("Deleted agent") messages = await project_client.agents.list_messages(thread_id=thread.id) - + for message in reversed(messages.data): # To remove characters, which are not correctly handled by print, we will encode the message # and then decode it again. clean_message = "\n".join( - text_msg.text.value.encode('ascii', 'ignore').decode('utf-8') for text_msg in message.text_messages + text_msg.text.value.encode("ascii", "ignore").decode("utf-8") for text_msg in message.text_messages ) print(f"Role: {message.role} Message: {clean_message}") + if __name__ == "__main__": asyncio.run(main()) diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_vector_store_file_search.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_vector_store_file_search.py index 7f415fd2344d..4c871b657f53 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_vector_store_file_search.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_vector_store_file_search.py @@ -22,11 +22,7 @@ import os from azure.ai.projects import AIProjectClient -from azure.ai.projects.models import ( - FileSearchTool, - FilePurpose, - MessageTextContent -) +from azure.ai.projects.models import FileSearchTool, FilePurpose, MessageTextContent from azure.identity import DefaultAzureCredential project_client = AIProjectClient.from_connection_string( @@ -74,11 +70,11 @@ print("Deleted agent") messages = project_client.agents.list_messages(thread_id=thread.id) - + for message in reversed(messages.data): # To remove characters, which are not correctly handled by print, we will encode the message # and then decode it again. clean_message = "\n".join( - text_msg.text.value.encode('ascii', 'ignore').decode('utf-8') for text_msg in message.text_messages + text_msg.text.value.encode("ascii", "ignore").decode("utf-8") for text_msg in message.text_messages ) print(f"Role: {message.role} Message: {clean_message}") diff --git a/sdk/ai/azure-ai-projects/setup.py b/sdk/ai/azure-ai-projects/setup.py index 23a8df9c3603..3c806ef0e972 100644 --- a/sdk/ai/azure-ai-projects/setup.py +++ b/sdk/ai/azure-ai-projects/setup.py @@ -22,7 +22,7 @@ GITHUB_URL = f"https://aka.ms/azsdk/azure-ai-projects/python/code" # Define the regular expression pattern to match links in the format [section name](#section_header) -pattern = re.compile(r'\[([^\]]+)\]\(#([^\)]+)\)') +pattern = re.compile(r"\[([^\]]+)\]\(#([^\)]+)\)") # a-b-c => a/b/c @@ -48,10 +48,10 @@ end_index = readme_content.find(PIPY_LONG_DESCRIPTION_END) long_description = readme_content[start_index:end_index].strip() long_description = long_description.replace("{{package_name}}", PACKAGE_PPRINT_NAME) - long_description = re.sub(pattern, rf'[\1]({GITHUB_URL})', long_description) + long_description = re.sub(pattern, rf"[\1]({GITHUB_URL})", long_description) links_index = readme_content.find(LINKS_DIVIDER) - long_description += "\n\n" + readme_content[links_index:].strip() - + long_description += "\n\n" + readme_content[links_index:].strip() + with open("CHANGELOG.md", "r") as f: long_description += "\n\n" + f.read() @@ -65,7 +65,7 @@ author="Microsoft Corporation", author_email="azpysdkhelp@microsoft.com", url="https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/ai/azure-ai-projects", - keywords="azure sdk, azure, ai, agents, foundry, inference, chat completion, project, evaluation", + keywords="azure sdk, azure, ai, agents, foundry, inference, chat completion, project, evaluation", classifiers=[ "Development Status :: 4 - Beta", "Programming Language :: Python", diff --git a/sdk/ai/azure-ai-projects/tests/inference/inference_test_base.py b/sdk/ai/azure-ai-projects/tests/inference/inference_test_base.py index f64e7d097af6..f654702c6634 100644 --- a/sdk/ai/azure-ai-projects/tests/inference/inference_test_base.py +++ b/sdk/ai/azure-ai-projects/tests/inference/inference_test_base.py @@ -53,6 +53,7 @@ def get_sync_client(self, **kwargs) -> AIProjectClient: credential=self.get_credential(AIProjectClient, is_async=False), conn_str=conn_str, logging_enable=LOGGING_ENABLED, + **kwargs, ) return project_client @@ -62,6 +63,7 @@ def get_async_client(self, **kwargs) -> AIProjectClientAsync: credential=self.get_credential(AIProjectClientAsync, is_async=True), conn_str=conn_str, logging_enable=LOGGING_ENABLED, + **kwargs, ) return project_client @@ -80,3 +82,12 @@ def get_image_embeddings_input(with_text: Optional[bool] = False) -> ImageEmbedd image_file=image_file, image_format="png", ) + + def validate_user_agent(self, starts_with: str) -> None: + print(f"Actual HTTP request headers: {self.pipeline_request.http_request.headers}") + headers = self.pipeline_request.http_request.headers + assert headers["User-Agent"].startswith(starts_with) + assert " Python/" in headers["User-Agent"] + + def request_callback(self, pipeline_request) -> None: + self.pipeline_request = pipeline_request diff --git a/sdk/ai/azure-ai-projects/tests/inference/test_inference.py b/sdk/ai/azure-ai-projects/tests/inference/test_inference.py index 54105749e1cf..3860c7161917 100644 --- a/sdk/ai/azure-ai-projects/tests/inference/test_inference.py +++ b/sdk/ai/azure-ai-projects/tests/inference/test_inference.py @@ -100,7 +100,7 @@ def test_inference_get_aoai_client_with_nonexisting_connection_name(self, **kwar @recorded_by_proxy def test_inference_get_chat_completions_client_key_auth(self, **kwargs): model = kwargs.pop("azure_ai_projects_inference_tests_chat_completions_model_deployment_name") - with self.get_sync_client(**kwargs) as project_client: + with self.get_sync_client(user_agent="MyAppId", **kwargs) as project_client: with project_client.inference.get_chat_completions_client() as chat_completions_client: response = chat_completions_client.complete( model=model, @@ -108,11 +108,13 @@ def test_inference_get_chat_completions_client_key_auth(self, **kwargs): SystemMessage(content="You are a helpful assistant."), UserMessage(content="How many feet are in a mile?"), ], + raw_request_hook=self.request_callback, ) print("\nChatCompletionsClient response:") pprint.pprint(response) contains = ["5280", "5,280"] assert any(item in response.choices[0].message.content for item in contains) + self.validate_user_agent(starts_with="MyAppId-AIProjectClient azsdk-python-ai-inference/") @servicePreparerInferenceTests() @recorded_by_proxy @@ -129,11 +131,13 @@ def test_inference_get_chat_completions_client_entra_id_auth(self, **kwargs): SystemMessage(content="You are a helpful assistant."), UserMessage(content="How many feet are in a mile?"), ], + raw_request_hook=self.request_callback, ) print("\nChatCompletionsClient response:") pprint.pprint(response) contains = ["5280", "5,280"] assert any(item in response.choices[0].message.content for item in contains) + self.validate_user_agent(starts_with="AIProjectClient azsdk-python-ai-inference/") @servicePreparerInferenceTests() def test_inference_get_chat_completions_client_with_empty_connection_name(self, **kwargs): @@ -164,9 +168,13 @@ def test_inference_get_chat_completions_client_with_nonexisting_connection_name( @recorded_by_proxy def test_inference_get_embeddings_client_key_auth(self, **kwargs): model = kwargs.pop("azure_ai_projects_inference_tests_embeddings_model_deployment_name") - with self.get_sync_client(**kwargs) as project_client: + with self.get_sync_client(user_agent="MyAppId", **kwargs) as project_client: with project_client.inference.get_embeddings_client() as embeddings_client: - response = embeddings_client.embed(model=model, input=["first phrase", "second phrase", "third phrase"]) + response = embeddings_client.embed( + model=model, + input=["first phrase", "second phrase", "third phrase"], + raw_request_hook=self.request_callback, + ) print("\nEmbeddingsClient response:") for item in response.data: length = len(item.embedding) @@ -179,6 +187,7 @@ def test_inference_get_embeddings_client_key_auth(self, **kwargs): assert len(item.embedding) > 0 assert item.embedding[0] != 0 assert item.embedding[-1] != 0 + self.validate_user_agent(starts_with="MyAppId-AIProjectClient azsdk-python-ai-inference/") @servicePreparerInferenceTests() @recorded_by_proxy @@ -187,7 +196,11 @@ def test_inference_get_embeddings_client_entra_id_auth(self, **kwargs): model = kwargs.pop("azure_ai_projects_inference_tests_embeddings_model_deployment_name") with self.get_sync_client(**kwargs) as project_client: with project_client.inference.get_embeddings_client(connection_name=connection_name) as embeddings_client: - response = embeddings_client.embed(model=model, input=["first phrase", "second phrase", "third phrase"]) + response = embeddings_client.embed( + model=model, + input=["first phrase", "second phrase", "third phrase"], + raw_request_hook=self.request_callback, + ) print("\nEmbeddingsClient response:") for item in response.data: length = len(item.embedding) @@ -200,6 +213,7 @@ def test_inference_get_embeddings_client_entra_id_auth(self, **kwargs): assert len(item.embedding) > 0 assert item.embedding[0] != 0 assert item.embedding[-1] != 0 + self.validate_user_agent(starts_with="AIProjectClient azsdk-python-ai-inference/") @servicePreparerInferenceTests() def test_inference_get_embeddings_client_with_empty_connection_name(self, **kwargs): @@ -230,9 +244,13 @@ def test_inference_get_embeddings_client_with_nonexisting_connection_name(self, @recorded_by_proxy def test_inference_get_image_embeddings_client_key_auth(self, **kwargs): model = kwargs.pop("azure_ai_projects_inference_tests_embeddings_model_deployment_name") - with self.get_sync_client(**kwargs) as project_client: + with self.get_sync_client(user_agent="MyAppId", **kwargs) as project_client: with project_client.inference.get_image_embeddings_client() as embeddings_client: - response = embeddings_client.embed(model=model, input=[InferenceTestBase.get_image_embeddings_input()]) + response = embeddings_client.embed( + model=model, + input=[InferenceTestBase.get_image_embeddings_input()], + raw_request_hook=self.request_callback, + ) print("\nImageEmbeddingsClient response:") for item in response.data: length = len(item.embedding) @@ -245,6 +263,7 @@ def test_inference_get_image_embeddings_client_key_auth(self, **kwargs): assert len(response.data[0].embedding) > 0 assert response.data[0].embedding[0] != 0.0 assert response.data[0].embedding[-1] != 0.0 + self.validate_user_agent(starts_with="MyAppId-AIProjectClient azsdk-python-ai-inference/") @servicePreparerInferenceTests() @recorded_by_proxy @@ -255,7 +274,11 @@ def test_inference_get_image_embeddings_client_entra_id_auth(self, **kwargs): with project_client.inference.get_image_embeddings_client( connection_name=connection_name ) as embeddings_client: - response = embeddings_client.embed(model=model, input=[InferenceTestBase.get_image_embeddings_input()]) + response = embeddings_client.embed( + model=model, + input=[InferenceTestBase.get_image_embeddings_input()], + raw_request_hook=self.request_callback, + ) print("\nImageEmbeddingsClient response:") for item in response.data: length = len(item.embedding) @@ -268,6 +291,7 @@ def test_inference_get_image_embeddings_client_entra_id_auth(self, **kwargs): assert len(response.data[0].embedding) > 0 assert response.data[0].embedding[0] != 0.0 assert response.data[0].embedding[-1] != 0.0 + self.validate_user_agent(starts_with="AIProjectClient azsdk-python-ai-inference/") @servicePreparerInferenceTests() def test_inference_get_image_embeddings_client_with_empty_connection_name(self, **kwargs): diff --git a/sdk/ai/azure-ai-projects/tests/inference/test_inference_async.py b/sdk/ai/azure-ai-projects/tests/inference/test_inference_async.py index 4b3f571881c5..11dec67b0896 100644 --- a/sdk/ai/azure-ai-projects/tests/inference/test_inference_async.py +++ b/sdk/ai/azure-ai-projects/tests/inference/test_inference_async.py @@ -103,7 +103,7 @@ async def test_inference_get_aoai_client_with_nonexisting_connection_name_async( @recorded_by_proxy_async async def test_inference_get_chat_completions_client_key_auth_async(self, **kwargs): model = kwargs.pop("azure_ai_projects_inference_tests_chat_completions_model_deployment_name") - async with self.get_async_client(**kwargs) as project_client: + async with self.get_async_client(user_agent="MyAppId", **kwargs) as project_client: async with await project_client.inference.get_chat_completions_client() as chat_completions_client: response = await chat_completions_client.complete( model=model, @@ -111,11 +111,13 @@ async def test_inference_get_chat_completions_client_key_auth_async(self, **kwar SystemMessage(content="You are a helpful assistant."), UserMessage(content="How many feet are in a mile?"), ], + raw_request_hook=self.request_callback, ) print("\nAsync ChatCompletionsClient response:") pprint.pprint(response) contains = ["5280", "5,280"] assert any(item in response.choices[0].message.content for item in contains) + self.validate_user_agent(starts_with="MyAppId-AIProjectClient azsdk-python-ai-inference/") @servicePreparerInferenceTests() @recorded_by_proxy_async @@ -132,11 +134,13 @@ async def test_inference_get_chat_completions_client_entra_id_auth_async(self, * SystemMessage(content="You are a helpful assistant."), UserMessage(content="How many feet are in a mile?"), ], + raw_request_hook=self.request_callback, ) print("\nAsync ChatCompletionsClient response:") pprint.pprint(response) contains = ["5280", "5,280"] assert any(item in response.choices[0].message.content for item in contains) + self.validate_user_agent(starts_with="AIProjectClient azsdk-python-ai-inference/") @servicePreparerInferenceTests() async def test_inference_get_chat_completions_client_with_empty_connection_name_async(self, **kwargs): @@ -167,10 +171,12 @@ async def test_inference_get_chat_completions_client_with_nonexisting_connection @recorded_by_proxy_async async def test_inference_get_embeddings_client_key_auth_async(self, **kwargs): model = kwargs.pop("azure_ai_projects_inference_tests_embeddings_model_deployment_name") - async with self.get_async_client(**kwargs) as project_client: + async with self.get_async_client(user_agent="MyAppId", **kwargs) as project_client: async with await project_client.inference.get_embeddings_client() as embeddings_client: response = await embeddings_client.embed( - model=model, input=["first phrase", "second phrase", "third phrase"] + model=model, + input=["first phrase", "second phrase", "third phrase"], + raw_request_hook=self.request_callback, ) print("\nEmbeddingsClient response:") for item in response.data: @@ -184,6 +190,7 @@ async def test_inference_get_embeddings_client_key_auth_async(self, **kwargs): assert len(item.embedding) > 0 assert item.embedding[0] != 0 assert item.embedding[-1] != 0 + self.validate_user_agent(starts_with="MyAppId-AIProjectClient azsdk-python-ai-inference/") @servicePreparerInferenceTests() @recorded_by_proxy_async @@ -195,7 +202,9 @@ async def test_inference_get_embeddings_client_entra_id_auth_async(self, **kwarg connection_name=connection_name ) as embeddings_client: response = await embeddings_client.embed( - model=model, input=["first phrase", "second phrase", "third phrase"] + model=model, + input=["first phrase", "second phrase", "third phrase"], + raw_request_hook=self.request_callback, ) print("\nEmbeddingsClient response:") for item in response.data: @@ -209,6 +218,7 @@ async def test_inference_get_embeddings_client_entra_id_auth_async(self, **kwarg assert len(item.embedding) > 0 assert item.embedding[0] != 0 assert item.embedding[-1] != 0 + self.validate_user_agent(starts_with="AIProjectClient azsdk-python-ai-inference/") @servicePreparerInferenceTests() async def test_inference_get_embeddings_client_with_empty_connection_name_async(self, **kwargs): @@ -239,10 +249,12 @@ async def test_inference_get_embeddings_client_with_nonexisting_connection_name_ @recorded_by_proxy_async async def test_inference_get_image_embeddings_client_key_auth_async(self, **kwargs): model = kwargs.pop("azure_ai_projects_inference_tests_embeddings_model_deployment_name") - async with self.get_async_client(**kwargs) as project_client: + async with self.get_async_client(user_agent="MyAppId", **kwargs) as project_client: async with await project_client.inference.get_image_embeddings_client() as embeddings_client: response = await embeddings_client.embed( - model=model, input=[InferenceTestBase.get_image_embeddings_input()] + model=model, + input=[InferenceTestBase.get_image_embeddings_input()], + raw_request_hook=self.request_callback, ) print("\nImageEmbeddingsClient response:") for item in response.data: @@ -256,6 +268,7 @@ async def test_inference_get_image_embeddings_client_key_auth_async(self, **kwar assert len(response.data[0].embedding) > 0 assert response.data[0].embedding[0] != 0.0 assert response.data[0].embedding[-1] != 0.0 + self.validate_user_agent(starts_with="MyAppId-AIProjectClient azsdk-python-ai-inference/") @servicePreparerInferenceTests() @recorded_by_proxy_async @@ -267,7 +280,9 @@ async def test_inference_get_image_embeddings_client_entra_id_auth_async(self, * connection_name=connection_name ) as embeddings_client: response = await embeddings_client.embed( - model=model, input=[InferenceTestBase.get_image_embeddings_input()] + model=model, + input=[InferenceTestBase.get_image_embeddings_input()], + raw_request_hook=self.request_callback, ) print("\nImageEmbeddingsClient response:") for item in response.data: @@ -281,6 +296,7 @@ async def test_inference_get_image_embeddings_client_entra_id_auth_async(self, * assert len(response.data[0].embedding) > 0 assert response.data[0].embedding[0] != 0.0 assert response.data[0].embedding[-1] != 0.0 + self.validate_user_agent(starts_with="AIProjectClient azsdk-python-ai-inference/") @servicePreparerInferenceTests() async def test_inference_get_image_embeddings_client_with_empty_connection_name_async(self, **kwargs): From 329cf829f20b69cb6fcf72d43740ffdcdf9af238 Mon Sep 17 00:00:00 2001 From: sophia-ramsey Date: Thu, 23 Jan 2025 12:47:22 -0800 Subject: [PATCH 05/16] update sample with correct year format (#39366) --- .../samples/agents/multiagent/sample_agents_multi_agent_team.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sdk/ai/azure-ai-projects/samples/agents/multiagent/sample_agents_multi_agent_team.py b/sdk/ai/azure-ai-projects/samples/agents/multiagent/sample_agents_multi_agent_team.py index 501ef6fb0335..ae9c05323d22 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/multiagent/sample_agents_multi_agent_team.py +++ b/sdk/ai/azure-ai-projects/samples/agents/multiagent/sample_agents_multi_agent_team.py @@ -87,7 +87,7 @@ agent_team.assemble_team() user_request = ( - "Hello, Please provide me current time in '2023-%m-%d %H:%M:%S' format, and the weather in New York. " + "Hello, Please provide me current time in '%Y-%m-%d %H:%M:%S' format, and the weather in New York. " "Finally, convert the Celsius to Fahrenheit and send an email to Example Recipient with summary of results." ) From 4f74e540ddd0f21d958fb184c90e3b8cd53fd6ad Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Fri, 24 Jan 2025 16:22:03 -0800 Subject: [PATCH 06/16] Update to version beta 6 --- sdk/ai/azure-ai-projects/CHANGELOG.md | 8 ++++++++ sdk/ai/azure-ai-projects/azure/ai/projects/_version.py | 2 +- 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/sdk/ai/azure-ai-projects/CHANGELOG.md b/sdk/ai/azure-ai-projects/CHANGELOG.md index 888d65059793..cd294dc42e03 100644 --- a/sdk/ai/azure-ai-projects/CHANGELOG.md +++ b/sdk/ai/azure-ai-projects/CHANGELOG.md @@ -1,5 +1,13 @@ # Release History +## 1.0.0b6 (Unreleased) + +### Features added + +### Bugs Fixed + +### Breaking Changes + ## 1.0.0b5 (2025-01-17) ### Features added diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/_version.py b/sdk/ai/azure-ai-projects/azure/ai/projects/_version.py index c7d155d924dd..d17ec8abfb6f 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/_version.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/_version.py @@ -6,4 +6,4 @@ # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -VERSION = "1.0.0b5" +VERSION = "1.0.0b6" From f2546e9cbc67c7bec5f3f2e49b069bf36f1b686b Mon Sep 17 00:00:00 2001 From: sophia-ramsey Date: Mon, 27 Jan 2025 14:56:50 -0800 Subject: [PATCH 07/16] Create sample for agents with logic apps (#39371) * Create sample for agents with logic apps * address comments * Update README and Prerequisites for samples * fix link validation issue * fix broken reference --- sdk/ai/azure-ai-projects/README.md | 64 +++++++++- .../agents/sample_agents_azure_ai_search.py | 10 +- .../agents/sample_agents_logic_apps.py | 120 ++++++++++++++++++ 3 files changed, 186 insertions(+), 8 deletions(-) create mode 100644 sdk/ai/azure-ai-projects/samples/agents/sample_agents_logic_apps.py diff --git a/sdk/ai/azure-ai-projects/README.md b/sdk/ai/azure-ai-projects/README.md index b787c222ab83..500a3683fd8e 100644 --- a/sdk/ai/azure-ai-projects/README.md +++ b/sdk/ai/azure-ai-projects/README.md @@ -6,7 +6,7 @@ Use the AI Projects client library (in preview) to: * **Enumerate connections** in your Azure AI Foundry project and get connection properties. For example, get the inference endpoint URL and credentials associated with your Azure OpenAI connection. * **Get an authenticated Inference client** to do chat completions, for the default Azure OpenAI or AI Services connections in your Azure AI Foundry project. Supports the AzureOpenAI client from the `openai` package, or clients from the `azure-ai-inference` package. -* **Develop Agents using the Azure AI Agent Service**, leveraging an extensive ecosystem of models, tools, and capabilities from OpenAI, Microsoft, and other LLM providers. The Azure AI Agent Service enables the building of Agents for a wide range of generative AI use cases. The package is currently in private preview. +* **Develop Agents using the Azure AI Agent Service**, leveraging an extensive ecosystem of models, tools, and capabilities from OpenAI, Microsoft, and other LLM providers. The Azure AI Agent Service enables the building of Agents for a wide range of generative AI use cases. The package is currently in preview. * **Run Evaluations** to assess the performance of generative AI applications using various evaluators and metrics. It includes built-in evaluators for quality, risk, and safety, and allows custom evaluators for specific needs. * **Enable OpenTelemetry tracing**. @@ -36,7 +36,7 @@ To report an issue with the client library, or request additional features, plea - [Get properties of a connection by its connection name](#get-properties-of-a-connection-by-its-connection-name) - [Get an authenticated ChatCompletionsClient](#get-an-authenticated-chatcompletionsclient) - [Get an authenticated AzureOpenAI client](#get-an-authenticated-azureopenai-client) - - [Agents (Private Preview)](#agents-private-preview) + - [Agents (Preview)](#agents-preview) - [Create an Agent](#create-agent) with: - [File Search](#create-agent-with-file-search) - [Enterprise File Search](#create-agent-with-enterprise-file-search) @@ -244,14 +244,14 @@ print(response.choices[0].message.content) See the "inference" folder in the [package samples][samples] for additional samples. -### Agents (Private Preview) +### Agents (Preview) Agents in the Azure AI Projects client library are designed to facilitate various interactions and operations within your AI projects. They serve as the core components that manage and execute tasks, leveraging different tools and resources to achieve specific goals. The following steps outline the typical sequence for interacting with Agents. See the "agents" folder in the [package samples][samples] for additional Agent samples. -Agents are actively being developed. A sign-up form for private preview is coming soon. - #### Create Agent +Before creating an Agent, you need to set up Azure resources to deploy your model. [Create a New Agent Quickstart](https://learn.microsoft.com/azure/ai-services/agents/quickstart?pivots=programming-language-python-azure) details selecting and deploying your Agent Setup. + Here is an example of how to create an Agent: @@ -447,7 +447,7 @@ with project_client: #### Create Agent with Azure AI Search -Azure AI Search is an enterprise search system for high-performance applications. It integrates with Azure OpenAI Service and Azure Machine Learning, offering advanced search technologies like vector search and full-text search. Ideal for knowledge base insights, information discovery, and automation +Azure AI Search is an enterprise search system for high-performance applications. It integrates with Azure OpenAI Service and Azure Machine Learning, offering advanced search technologies like vector search and full-text search. Ideal for knowledge base insights, information discovery, and automation. Creating an Agent with Azure AI Search requires an existing Azure AI Search Index. For more information and setup guides, see [Azure AI Search Tool Guide](https://learn.microsoft.com/azure/ai-services/agents/how-to/tools/azure-ai-search?tabs=azurecli%2Cpython&pivots=overview-azure-ai-search). Here is an example to integrate Azure AI Search: @@ -571,6 +571,58 @@ print(f"Created agent, agent ID: {agent.id}") +#### Create Agent With Logic Apps + +Logic Apps allow HTTP requests to trigger actions. For more information, refer to the guide [Logic App Workflows for Function Calling](https://learn.microsoft.com/azure/ai-services/openai/how-to/assistants-logic-apps#create-logic-apps-workflows-for-function-calling). +Agents SDK accesses Logic Apps through Workflow URLs, which are called as requests in functions. + + + +```python +def send_email_via_logic_app(recipient: str, subject: str, body: str) -> str: + """ + Sends an email by triggering an Azure Logic App endpoint. + The Logic App must be configured to accept JSON with 'to', 'subject', and 'body'. + """ + if not LOGIC_APP_URL: + raise ValueError("Logic App URL is not set.") + + payload = { + "to": recipient, + "subject": subject, + "body": body + } + + response = requests.post(url=LOGIC_APP_URL, json=payload) + if response.ok: + return json.dumps({"result": "Email sent successfully."}) + else: + return json.dumps({"error": f"Error sending email request ({response.status_code}): {response.text}"}) +``` + + +Then, the Logic App and other functions can be incorporated into code using `FunctionTool` and `ToolSet`. + + +```python +functions_to_use: Set = { + fetch_current_datetime, + send_email_via_logic_app, + } + + functions = FunctionTool(functions=functions_to_use) + toolset = ToolSet() + toolset.add(functions) + + agent = project_client.agents.create_agent( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="SendEmailAgent", + instructions="You are a specialized agent for sending emails.", + toolset=toolset, + ) +``` + + #### Create Agent With OpenAPI OpenAPI specifications describe REST operations against a specific endpoint. Agents SDK can read an OpenAPI spec, create a function from it, and call that function against the REST endpoint without additional client-side execution. diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_azure_ai_search.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_azure_ai_search.py index 763817b50122..b484935251d3 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_azure_ai_search.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_azure_ai_search.py @@ -7,8 +7,14 @@ DESCRIPTION: This sample demonstrates how to use agent operations with the Azure AI Search tool from the Azure Agents service using a synchronous client. - To learn how to set up an Azure AI Search resource, - visit https://learn.microsoft.com/azure/search/search-get-started-portal + +PREREQUISITES: + You will need an Azure AI Search Resource. + If you already have one, you must create an agent that can use an existing Azure AI Search index: + https://learn.microsoft.com/azure/ai-services/agents/how-to/tools/azure-ai-search?tabs=azurecli%2Cpython&pivots=overview-azure-ai-search + + If you do not already have an Agent Setup with an Azure AI Search resource, follow the guide for a Standard Agent setup: + https://learn.microsoft.com/azure/ai-services/agents/quickstart?pivots=programming-language-python-azure USAGE: python sample_agents_azure_ai_search.py diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_logic_apps.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_logic_apps.py new file mode 100644 index 000000000000..b790f5890d50 --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_logic_apps.py @@ -0,0 +1,120 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use agents to execute tasks with Logic Apps. + +PREREQUISITES: + Create a Logic App configured to send emails. The Logic App must include an HTTP request trigger that is + configured to accept JSON with 'to', 'subject', and 'body'. The guide to creating a Logic App Workflow + can be found here: + https://learn.microsoft.com/azure/ai-services/openai/how-to/assistants-logic-apps#create-logic-apps-workflows-for-function-calling + +USAGE: + python sample_agents_logic_apps.py + + Before running the sample: + + pip install azure-ai-projects azure-identity + + Replace with a valid email address in the message content. + + Set this environment variables with your own values: + 1) PROJECT_CONNECTION_STRING - The project connection string, as found in the overview page of your + Azure AI Foundry project. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. + 3) LOGIC_APP_URL - the URL of the Logic App Workflow URL to send emails, as found in your Azure Portal. +""" + +import os, sys + +# Get the parent directory +parent_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "..")) +# Add the parent directory to the system path +sys.path.append(parent_dir) +from typing import Set +from user_functions import fetch_current_datetime +from azure.ai.projects import AIProjectClient +from azure.ai.projects.models import ToolSet, FunctionTool +from azure.identity import DefaultAzureCredential +import requests, json + + +LOGIC_APP_URL = os.environ.get("LOGIC_APP_URL", "") + + +def send_email_via_logic_app(recipient: str, subject: str, body: str) -> str: + """ + Sends an email by triggering an Azure Logic App endpoint. + The Logic App must be configured to accept JSON with 'to', 'subject', and 'body'. + """ + if not LOGIC_APP_URL: + raise ValueError("Logic App URL is not set.") + + payload = { + "to": recipient, + "subject": subject, + "body": body + } + + response = requests.post(url=LOGIC_APP_URL, json=payload) + if response.ok: + return json.dumps({"result": "Email sent successfully."}) + else: + return json.dumps({"error": f"Error sending email request ({response.status_code}): {response.text}"}) + + +project_client = AIProjectClient.from_connection_string( + credential=DefaultAzureCredential(), + conn_str=os.environ["PROJECT_CONNECTION_STRING"], +) + +functions_to_use: Set = { + fetch_current_datetime, + send_email_via_logic_app, +} + +with project_client: + + functions = FunctionTool(functions=functions_to_use) + toolset = ToolSet() + toolset.add(functions) + + agent = project_client.agents.create_agent( + model=os.environ["MODEL_DEPLOYMENT_NAME"], + name="SendEmailAgent", + instructions="You are a specialized agent for sending emails.", + toolset=toolset, + ) + print(f"Created agent, ID: {agent.id}") + + # Create thread for communication + thread = project_client.agents.create_thread() + print(f"Created thread, ID: {thread.id}") + + # Create message to thread + message = project_client.agents.create_message( + thread_id=thread.id, + role="user", + content="Hello, please send an email to with the date and time in '%Y-%m-%d %H:%M:%S' format.", + ) + print(f"Created message, ID: {message.id}") + + # Create and process agent run in thread with tools + run = project_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id) + print(f"Run finished with status: {run.status}") + + if run.status == "failed": + print(f"Run failed: {run.last_error}") + + # Delete the assistant when done + project_client.agents.delete_agent(agent.id) + print("Deleted agent") + + # Fetch and log all messages + messages = project_client.agents.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") \ No newline at end of file From 0dbe0ee6569b136613c34dd89ba3b3dbf6d10d89 Mon Sep 17 00:00:00 2001 From: M-Hietala <78813398+M-Hietala@users.noreply.github.com> Date: Thu, 30 Jan 2025 14:12:32 -0600 Subject: [PATCH 08/16] adding a sample how to add custom attributes to traces (#39445) * adding a sample how to add custom attributes to traces * updating comments * fixing tool check errors --------- Co-authored-by: Marko Hietala --- ..._with_console_tracing_custom_attributes.py | 107 ++++++++++++++++++ 1 file changed, 107 insertions(+) create mode 100644 sdk/ai/azure-ai-projects/samples/agents/sample_agents_basics_with_console_tracing_custom_attributes.py diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_basics_with_console_tracing_custom_attributes.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_basics_with_console_tracing_custom_attributes.py new file mode 100644 index 000000000000..15d9ace4012a --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_basics_with_console_tracing_custom_attributes.py @@ -0,0 +1,107 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + This sample demonstrates how to use basic agent operations from + the Azure Agents service using a synchronous client with tracing to console and adding + custom attributes to the span. + +USAGE: + python sample_agents_basics_with_console_tracing_custom_attributes.py + + Before running the sample: + + pip install azure-ai-projects azure-identity opentelemetry-sdk azure-core-tracing-opentelemetry + + If you want to export telemetry to OTLP endpoint (such as Aspire dashboard + https://learn.microsoft.com/dotnet/aspire/fundamentals/dashboard/standalone?tabs=bash) + install: + + pip install opentelemetry-exporter-otlp-proto-grpc + + Set these environment variables with your own values: + 1) PROJECT_CONNECTION_STRING - The project connection string, as found in the overview page of your + Azure AI Foundry project. + 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in + the "Models + endpoints" tab in your Azure AI Foundry project. + 3) AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED - Optional. Set to `true` to trace the content of chat + messages, which may contain personal data. False by default. +""" + +import os, sys, time +from typing import cast +from azure.ai.projects import AIProjectClient +from azure.identity import DefaultAzureCredential +from opentelemetry import trace +from opentelemetry.sdk.trace import SpanProcessor, ReadableSpan, Span, TracerProvider + + +# Define the custom span processor that is used for adding the custom +# attributes to spans when they are started. +class CustomAttributeSpanProcessor(SpanProcessor): + def __init__(self): + pass + + def on_start(self, span: Span, parent_context=None): + # Add this attribute to all spans + span.set_attribute("trace_sample.sessionid", "123") + + # Add another attribute only to create_message spans + if span.name == "create_message": + span.set_attribute("trace_sample.message.context", "abc") + + def on_end(self, span: ReadableSpan): + # Clean-up logic can be added here if necessary + pass + + +project_client = AIProjectClient.from_connection_string( + credential=DefaultAzureCredential(), + conn_str=os.environ["PROJECT_CONNECTION_STRING"], +) + +# Enable console tracing +# or, if you have local OTLP endpoint running, change it to +# project_client.telemetry.enable(destination="http://localhost:4317") +project_client.telemetry.enable(destination=sys.stdout) + +# Add the custom span processor to the global tracer provider +provider = cast(TracerProvider, trace.get_tracer_provider()) +provider.add_span_processor(CustomAttributeSpanProcessor()) + +scenario = os.path.basename(__file__) +tracer = trace.get_tracer(__name__) + +with tracer.start_as_current_span(scenario): + with project_client: + agent = project_client.agents.create_agent( + model=os.environ["MODEL_DEPLOYMENT_NAME"], name="my-assistant", instructions="You are helpful assistant" + ) + print(f"Created agent, agent ID: {agent.id}") + + thread = project_client.agents.create_thread() + print(f"Created thread, thread ID: {thread.id}") + + message = project_client.agents.create_message( + thread_id=thread.id, role="user", content="Hello, tell me a joke" + ) + print(f"Created message, message ID: {message.id}") + + run = project_client.agents.create_run(thread_id=thread.id, assistant_id=agent.id) + + # Poll the run as long as run status is queued or in progress + while run.status in ["queued", "in_progress", "requires_action"]: + # Wait for a second + time.sleep(1) + run = project_client.agents.get_run(thread_id=thread.id, run_id=run.id) + + print(f"Run status: {run.status}") + + project_client.agents.delete_agent(agent.id) + print("Deleted agent") + + messages = project_client.agents.list_messages(thread_id=thread.id) + print(f"messages: {messages}") From cd61ea1d75cfb937f24ca1025b7b578cd6e73314 Mon Sep 17 00:00:00 2001 From: sophia-ramsey Date: Thu, 30 Jan 2025 13:14:46 -0800 Subject: [PATCH 09/16] update logic app sample to fetch workflow url (#39477) --- sdk/ai/azure-ai-projects/README.md | 39 +++---- .../agents/sample_agents_logic_apps.py | 109 +++++++++--------- .../samples/agents/user_logic_apps.py | 84 ++++++++++++++ 3 files changed, 159 insertions(+), 73 deletions(-) create mode 100644 sdk/ai/azure-ai-projects/samples/agents/user_logic_apps.py diff --git a/sdk/ai/azure-ai-projects/README.md b/sdk/ai/azure-ai-projects/README.md index 500a3683fd8e..ba5c14afcc05 100644 --- a/sdk/ai/azure-ai-projects/README.md +++ b/sdk/ai/azure-ai-projects/README.md @@ -574,30 +574,27 @@ print(f"Created agent, agent ID: {agent.id}") #### Create Agent With Logic Apps Logic Apps allow HTTP requests to trigger actions. For more information, refer to the guide [Logic App Workflows for Function Calling](https://learn.microsoft.com/azure/ai-services/openai/how-to/assistants-logic-apps#create-logic-apps-workflows-for-function-calling). -Agents SDK accesses Logic Apps through Workflow URLs, which are called as requests in functions. +Your Logic App must be in the same resource group as your Azure AI Project, shown in the Azure Portal. Agents SDK accesses Logic Apps through Workflow URLs, which are fetched and called as requests in functions. - + ```python def send_email_via_logic_app(recipient: str, subject: str, body: str) -> str: - """ - Sends an email by triggering an Azure Logic App endpoint. - The Logic App must be configured to accept JSON with 'to', 'subject', and 'body'. - """ - if not LOGIC_APP_URL: - raise ValueError("Logic App URL is not set.") - - payload = { - "to": recipient, - "subject": subject, - "body": body - } - - response = requests.post(url=LOGIC_APP_URL, json=payload) - if response.ok: - return json.dumps({"result": "Email sent successfully."}) - else: - return json.dumps({"error": f"Error sending email request ({response.status_code}): {response.text}"}) + """ + Sends an email by invoking the specified Logic App with the given recipient, subject, and body. + + :param recipient: The email address of the recipient. + :param subject: The subject of the email. + :param body: The body of the email. + :return: A JSON string summarizing the result of the operation. + """ + payload = { + "to": recipient, + "subject": subject, + "body": body, + } + result = service.invoke_logic_app(logic_app_name, payload) + return json.dumps(result) ``` @@ -1285,4 +1282,4 @@ additional questions or comments. [azure_sub]: https://azure.microsoft.com/free/ [evaluators]: https://learn.microsoft.com/azure/ai-studio/how-to/develop/evaluate-sdk [azure_ai_evaluation]: https://learn.microsoft.com/python/api/overview/azure/ai-evaluation-readme -[evaluator_library]: https://learn.microsoft.com/azure/ai-studio/how-to/evaluate-generative-ai-app#view-and-manage-the-evaluators-in-the-evaluator-library +[evaluator_library]: https://learn.microsoft.com/azure/ai-studio/how-to/evaluate-generative-ai-app#view-and-manage-the-evaluators-in-the-evaluator-library \ No newline at end of file diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_logic_apps.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_logic_apps.py index b790f5890d50..ccd78d3c434b 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_logic_apps.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_logic_apps.py @@ -5,13 +5,14 @@ """ DESCRIPTION: - This sample demonstrates how to use agents to execute tasks with Logic Apps. + This sample demonstrates how to use agents with Logic Apps to execute the task of sending an email. PREREQUISITES: - Create a Logic App configured to send emails. The Logic App must include an HTTP request trigger that is + 1) Create a Logic App within the same resource group as your Azure AI Project in Azure Portal + 2) To configure your Logic App to send emails, you must include an HTTP request trigger that is configured to accept JSON with 'to', 'subject', and 'body'. The guide to creating a Logic App Workflow can be found here: - https://learn.microsoft.com/azure/ai-services/openai/how-to/assistants-logic-apps#create-logic-apps-workflows-for-function-calling + https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/assistants-logic-apps#create-logic-apps-workflows-for-function-calling USAGE: python sample_agents_logic_apps.py @@ -20,70 +21,71 @@ pip install azure-ai-projects azure-identity - Replace with a valid email address in the message content. - Set this environment variables with your own values: 1) PROJECT_CONNECTION_STRING - The project connection string, as found in the overview page of your Azure AI Foundry project. 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Azure AI Foundry project. - 3) LOGIC_APP_URL - the URL of the Logic App Workflow URL to send emails, as found in your Azure Portal. + + Replace the following values in the sample with your own values: + 1) - The name of the Logic App you created. + 2) - The name of the trigger in the Logic App you created (the default name for HTTP + triggers in the Azure Portal is "When_a_HTTP_request_is_received"). + 3) - The email address of the recipient. """ -import os, sys - -# Get the parent directory -parent_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "..")) -# Add the parent directory to the system path -sys.path.append(parent_dir) + +import os +import requests from typing import Set -from user_functions import fetch_current_datetime + from azure.ai.projects import AIProjectClient from azure.ai.projects.models import ToolSet, FunctionTool from azure.identity import DefaultAzureCredential -import requests, json - - -LOGIC_APP_URL = os.environ.get("LOGIC_APP_URL", "") - - -def send_email_via_logic_app(recipient: str, subject: str, body: str) -> str: - """ - Sends an email by triggering an Azure Logic App endpoint. - The Logic App must be configured to accept JSON with 'to', 'subject', and 'body'. - """ - if not LOGIC_APP_URL: - raise ValueError("Logic App URL is not set.") - - payload = { - "to": recipient, - "subject": subject, - "body": body - } - - response = requests.post(url=LOGIC_APP_URL, json=payload) - if response.ok: - return json.dumps({"result": "Email sent successfully."}) - else: - return json.dumps({"error": f"Error sending email request ({response.status_code}): {response.text}"}) - - + +# Example user function +from user_functions import fetch_current_datetime + +# Import AzureLogicAppTool and the function factory from user_logic_apps +from user_logic_apps import ( + AzureLogicAppTool, + create_send_email_function +) + +# Create the project client project_client = AIProjectClient.from_connection_string( credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"], ) - + +# Extract subscription and resource group from the project scope +subscription_id = project_client.scope["subscription_id"] +resource_group = project_client.scope["resource_group_name"] + +# Logic App details +logic_app_name = "" +trigger_name = "" + +# Create and initialize our AzureLogicAppTool instance +logic_app_tool = AzureLogicAppTool(subscription_id, resource_group) +logic_app_tool.register_logic_app(logic_app_name, trigger_name) +print(f"Registered logic app '{logic_app_name}' with trigger '{trigger_name}'.") + +# Create the specialized "send_email_via_logic_app" function for your agent tools +send_email_func = create_send_email_function(logic_app_tool, logic_app_name) + +# Prepare the function tools for the agent functions_to_use: Set = { fetch_current_datetime, - send_email_via_logic_app, + send_email_func, # This references the AzureLogicAppTool instance via closure } - + with project_client: - + # Create an agent functions = FunctionTool(functions=functions_to_use) toolset = ToolSet() toolset.add(functions) - + agent = project_client.agents.create_agent( model=os.environ["MODEL_DEPLOYMENT_NAME"], name="SendEmailAgent", @@ -92,26 +94,29 @@ def send_email_via_logic_app(recipient: str, subject: str, body: str) -> str: ) print(f"Created agent, ID: {agent.id}") - # Create thread for communication + # Create a thread for communication thread = project_client.agents.create_thread() print(f"Created thread, ID: {thread.id}") - # Create message to thread + # Create a message in the thread message = project_client.agents.create_message( thread_id=thread.id, role="user", - content="Hello, please send an email to with the date and time in '%Y-%m-%d %H:%M:%S' format.", + content="Hello, please send an email to with the date and time in '%Y-%m-%d %H:%M:%S' format.", ) print(f"Created message, ID: {message.id}") - # Create and process agent run in thread with tools - run = project_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id) - print(f"Run finished with status: {run.status}") + # Create and process an agent run in the thread + run = project_client.agents.create_and_process_run( + thread_id=thread.id, + assistant_id=agent.id + ) + print(f"Run finished with status: {run.status}") if run.status == "failed": print(f"Run failed: {run.last_error}") - # Delete the assistant when done + # Delete the agent when done project_client.agents.delete_agent(agent.id) print("Deleted agent") diff --git a/sdk/ai/azure-ai-projects/samples/agents/user_logic_apps.py b/sdk/ai/azure-ai-projects/samples/agents/user_logic_apps.py new file mode 100644 index 000000000000..c3ee94f3810b --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/user_logic_apps.py @@ -0,0 +1,84 @@ +import json +import requests +from typing import Dict, Any, Callable + +from azure.identity import DefaultAzureCredential +from azure.mgmt.logic import LogicManagementClient + + +class AzureLogicAppTool: + """ + A service that manages multiple Logic Apps by retrieving and storing their callback URLs, + and then invoking them with an appropriate payload. + """ + + def __init__(self, subscription_id: str, resource_group: str, credential=None): + if credential is None: + credential = DefaultAzureCredential() + self.subscription_id = subscription_id + self.resource_group = resource_group + self.logic_client = LogicManagementClient(credential, subscription_id) + + self.callback_urls: Dict[str, str] = {} + + def register_logic_app(self, logic_app_name: str, trigger_name: str) -> None: + """ + Retrieves and stores a callback URL for a specific Logic App + trigger. + Raises a ValueError if the callback URL is missing. + """ + callback = self.logic_client.workflow_triggers.list_callback_url( + resource_group_name=self.resource_group, + workflow_name=logic_app_name, + trigger_name=trigger_name, + ) + + if callback.value is None: + raise ValueError(f"No callback URL returned for Logic App '{logic_app_name}'.") + + self.callback_urls[logic_app_name] = callback.value + + def invoke_logic_app(self, logic_app_name: str, payload: Dict[str, Any]) -> Dict[str, Any]: + """ + Invokes the registered Logic App (by name) with the given JSON payload. + Returns a dictionary summarizing success/failure. + """ + if logic_app_name not in self.callback_urls: + raise ValueError(f"Logic App '{logic_app_name}' has not been registered.") + + url = self.callback_urls[logic_app_name] + response = requests.post(url=url, json=payload) + + if response.ok: + return {"result": f"Successfully invoked {logic_app_name}."} + else: + return { + "error": ( + f"Error invoking {logic_app_name} " + f"({response.status_code}): {response.text}" + ) + } + + +def create_send_email_function(service: AzureLogicAppTool, logic_app_name: str) -> Callable[[str, str, str], str]: + """ + Returns a function that sends an email by invoking the specified Logic App in LogicAppService. + This keeps the LogicAppService instance out of global scope by capturing it in a closure. + """ + def send_email_via_logic_app(recipient: str, subject: str, body: str) -> str: + """ + Sends an email by invoking the specified Logic App with the given recipient, subject, and body. + + :param recipient: The email address of the recipient. + :param subject: The subject of the email. + :param body: The body of the email. + :return: A JSON string summarizing the result of the operation. + """ + payload = { + "to": recipient, + "subject": subject, + "body": body, + } + result = service.invoke_logic_app(logic_app_name, payload) + return json.dumps(result) + + return send_email_via_logic_app From bd37d406bc16caf62f8ea88d564a0561b7135c5b Mon Sep 17 00:00:00 2001 From: Jarno Hakulinen Date: Fri, 31 Jan 2025 15:34:21 -0900 Subject: [PATCH 10/16] Fixes to vectorstore/file patches (#39514) * add fixes to file/vs patches * fix trailing whitespace --- .../azure/ai/projects/operations/_patch.py | 113 +++++++++++++----- 1 file changed, 81 insertions(+), 32 deletions(-) diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py index 4a9d5b2b8a7f..95a055bb4082 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py @@ -2834,7 +2834,7 @@ def create_vector_store_and_poll( @distributed_trace def create_vector_store_and_poll( self, - body: Union[JSON, IO[bytes], None] = None, + body: Union[JSON, IO[bytes]] = _Unset, *, content_type: str = "application/json", file_ids: Optional[List[str]] = None, @@ -2878,29 +2878,41 @@ def create_vector_store_and_poll( :raises ~azure.core.exceptions.HttpResponseError: """ - if body is not None: - vector_store = self.create_vector_store(body=body, content_type=content_type, **kwargs) - elif file_ids is not None or data_sources is not None or (name is not None and expires_after is not None): - store_configuration = _models.VectorStoreConfiguration(data_sources=data_sources) if data_sources else None - vector_store = self.create_vector_store( - content_type=content_type, + if body is not _Unset: + if isinstance(body, dict): + vector_store = super().create_vector_store( + body=body, + content_type=content_type or "application/json", + **kwargs + ) + elif isinstance(body, io.IOBase): + vector_store = super().create_vector_store( + body=body, + content_type=content_type, + **kwargs + ) + else: + raise ValueError( + "Invalid 'body' type: must be a dictionary (JSON) or a file-like object (IO[bytes])." + ) + else: + store_configuration = None + if data_sources: + store_configuration = _models.VectorStoreConfiguration(data_sources=data_sources) + + vector_store = super().create_vector_store( file_ids=file_ids, store_configuration=store_configuration, name=name, expires_after=expires_after, chunking_strategy=chunking_strategy, metadata=metadata, - **kwargs, - ) - else: - raise ValueError( - "Invalid parameters for create_vector_store_and_poll. Please provide either 'body', " - "'file_ids', 'store_configuration', or 'name' and 'expires_after'." + **kwargs ) while vector_store.status == "in_progress": time.sleep(sleep_interval) - vector_store = self.get_vector_store(vector_store.id) + vector_store = super().get_vector_store(vector_store.id) return vector_store @@ -2997,11 +3009,12 @@ def create_vector_store_file_batch_and_poll( def create_vector_store_file_batch_and_poll( self, vector_store_id: str, - body: Union[JSON, IO[bytes], None] = None, + body: Union[JSON, IO[bytes]] = _Unset, *, file_ids: Optional[List[str]] = None, data_sources: Optional[List[_models.VectorStoreDataSource]] = None, chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, + content_type: str = "application/json", sleep_interval: float = 1, **kwargs: Any, ) -> _models.VectorStoreFileBatch: @@ -3018,6 +3031,8 @@ def create_vector_store_file_batch_and_poll( :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will use the auto strategy. Default value is None. :paramtype chunking_strategy: ~azure.ai.projects.models.VectorStoreChunkingStrategyRequest + :keyword content_type: Body parameter content-type. Defaults to "application/json". + :paramtype content_type: str :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value is 1. :paramtype sleep_interval: float @@ -3026,7 +3041,26 @@ def create_vector_store_file_batch_and_poll( :raises ~azure.core.exceptions.HttpResponseError: """ - if body is None: + if body is not _Unset: + if isinstance(body, dict): + vector_store_file_batch = super().create_vector_store_file_batch( + vector_store_id=vector_store_id, + body=body, + content_type=content_type or "application/json", + **kwargs, + ) + elif isinstance(body, io.IOBase): + vector_store_file_batch = super().create_vector_store_file_batch( + vector_store_id=vector_store_id, + body=body, + content_type=content_type, + **kwargs, + ) + else: + raise ValueError( + "Invalid type for 'body'. Must be a dict (JSON) or file-like (IO[bytes])." + ) + else: vector_store_file_batch = super().create_vector_store_file_batch( vector_store_id=vector_store_id, file_ids=file_ids, @@ -3034,11 +3068,6 @@ def create_vector_store_file_batch_and_poll( chunking_strategy=chunking_strategy, **kwargs, ) - else: - content_type = kwargs.get("content_type", "application/json") - vector_store_file_batch = super().create_vector_store_file_batch( - body=body, content_type=content_type, **kwargs - ) while vector_store_file_batch.status == "in_progress": time.sleep(sleep_interval) @@ -3146,7 +3175,7 @@ def create_vector_store_file_and_poll( *, content_type: str = "application/json", file_id: Optional[str] = None, - data_sources: Optional[List[_models.VectorStoreDataSource]] = None, + data_source: Optional[_models.VectorStoreDataSource] = None, chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, sleep_interval: float = 1, **kwargs: Any, @@ -3160,8 +3189,8 @@ def create_vector_store_file_and_poll( :paramtype content_type: str :keyword file_id: Identifier of the file. Default value is None. :paramtype file_id: str - :keyword data_sources: Azure asset ID. Default value is None. - :paramtype data_sources: list[~azure.ai.projects.models.VectorStoreDataSource] + :keyword data_source: Azure asset ID. Default value is None. + :paramtype data_source: ~azure.ai.projects.models.VectorStoreDataSource :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will use the auto strategy. Default value is None. :paramtype chunking_strategy: ~azure.ai.projects.models.VectorStoreChunkingStrategyRequest @@ -3206,8 +3235,9 @@ def create_vector_store_file_and_poll( vector_store_id: str, body: Union[JSON, IO[bytes]] = _Unset, *, + content_type: str = "application/json", file_id: Optional[str] = None, - data_sources: Optional[List[_models.VectorStoreDataSource]] = None, + data_source: Optional[_models.VectorStoreDataSource] = None, chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, sleep_interval: float = 1, **kwargs: Any, @@ -3218,10 +3248,12 @@ def create_vector_store_file_and_poll( :type vector_store_id: str :param body: Is either a JSON type or a IO[bytes] type. Required. :type body: JSON or IO[bytes] + :keyword content_type: Body Parameter content-type. Defaults to 'application/json'. + :paramtype content_type: str :keyword file_id: Identifier of the file. Default value is None. :paramtype file_id: str - :keyword data_sources: Azure asset ID. Default value is None. - :paramtype data_sources: list[~azure.ai.projects.models.VectorStoreDataSource] + :keyword data_source: Azure asset ID. Default value is None. + :paramtype data_source: ~azure.ai.projects.models.VectorStoreDataSource :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will use the auto strategy. Default value is None. :paramtype chunking_strategy: ~azure.ai.projects.models.VectorStoreChunkingStrategyRequest @@ -3232,17 +3264,34 @@ def create_vector_store_file_and_poll( :rtype: ~azure.ai.projects.models.VectorStoreFile :raises ~azure.core.exceptions.HttpResponseError: """ - if body is None: + + if body is not _Unset: + if isinstance(body, dict): + vector_store_file = super().create_vector_store_file( + vector_store_id=vector_store_id, + body=body, + content_type=content_type or "application/json", + **kwargs, + ) + elif isinstance(body, io.IOBase): + vector_store_file = super().create_vector_store_file( + vector_store_id=vector_store_id, + body=body, + content_type=content_type, + **kwargs, + ) + else: + raise ValueError( + "Invalid type for 'body'. Must be a dict (JSON) or file-like object (IO[bytes])." + ) + else: vector_store_file = super().create_vector_store_file( vector_store_id=vector_store_id, file_id=file_id, - data_sources=data_sources, + data_source=data_source, chunking_strategy=chunking_strategy, **kwargs, ) - else: - content_type = kwargs.get("content_type", "application/json") - vector_store_file = super().create_vector_store_file(body=body, content_type=content_type, **kwargs) while vector_store_file.status == "in_progress": time.sleep(sleep_interval) From 67d15026290fd6b258821aa94adb9688ba5d6a1d Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Mon, 3 Feb 2025 07:54:48 -0800 Subject: [PATCH 11/16] Add "None" as possible authentication type for connections. (#39517) --- .../azure/ai/projects/_model_base.py | 60 ++ .../azure/ai/projects/_serialization.py | 76 ++- .../ai/projects/aio/operations/_operations.py | 516 +++++++++--------- .../ai/projects/aio/operations/_patch.py | 184 ++++--- .../azure/ai/projects/models/_enums.py | 2 + .../azure/ai/projects/models/_models.py | 73 ++- .../ai/projects/operations/_operations.py | 516 +++++++++--------- .../azure/ai/projects/operations/_patch.py | 192 +++---- .../agents/sample_agents_logic_apps.py | 18 +- .../samples/agents/user_logic_apps.py | 12 +- sdk/ai/azure-ai-projects/tsp-location.yaml | 2 +- 11 files changed, 874 insertions(+), 777 deletions(-) diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/_model_base.py b/sdk/ai/azure-ai-projects/azure/ai/projects/_model_base.py index 7f73b97b23ef..3072ee252ed9 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/_model_base.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/_model_base.py @@ -373,15 +373,34 @@ def __ne__(self, other: typing.Any) -> bool: return not self.__eq__(other) def keys(self) -> typing.KeysView[str]: + """ + :returns: a set-like object providing a view on D's keys + :rtype: ~typing.KeysView + """ return self._data.keys() def values(self) -> typing.ValuesView[typing.Any]: + """ + :returns: an object providing a view on D's values + :rtype: ~typing.ValuesView + """ return self._data.values() def items(self) -> typing.ItemsView[str, typing.Any]: + """ + :returns: set-like object providing a view on D's items + :rtype: ~typing.ItemsView + """ return self._data.items() def get(self, key: str, default: typing.Any = None) -> typing.Any: + """ + Get the value for key if key is in the dictionary, else default. + :param str key: The key to look up. + :param any default: The value to return if key is not in the dictionary. Defaults to None + :returns: D[k] if k in D, else d. + :rtype: any + """ try: return self[key] except KeyError: @@ -397,17 +416,38 @@ def pop(self, key: str, default: _T) -> _T: ... def pop(self, key: str, default: typing.Any) -> typing.Any: ... def pop(self, key: str, default: typing.Any = _UNSET) -> typing.Any: + """ + Removes specified key and return the corresponding value. + :param str key: The key to pop. + :param any default: The value to return if key is not in the dictionary + :returns: The value corresponding to the key. + :rtype: any + :raises KeyError: If key is not found and default is not given. + """ if default is _UNSET: return self._data.pop(key) return self._data.pop(key, default) def popitem(self) -> typing.Tuple[str, typing.Any]: + """ + Removes and returns some (key, value) pair + :returns: The (key, value) pair. + :rtype: tuple + :raises KeyError: if D is empty. + """ return self._data.popitem() def clear(self) -> None: + """ + Remove all items from D. + """ self._data.clear() def update(self, *args: typing.Any, **kwargs: typing.Any) -> None: + """ + Updates D from mapping/iterable E and F. + :param any args: Either a mapping object or an iterable of key-value pairs. + """ self._data.update(*args, **kwargs) @typing.overload @@ -417,6 +457,13 @@ def setdefault(self, key: str, default: None = None) -> None: ... def setdefault(self, key: str, default: typing.Any) -> typing.Any: ... def setdefault(self, key: str, default: typing.Any = _UNSET) -> typing.Any: + """ + Same as calling D.get(k, d), and setting D[k]=d if k not found + :param str key: The key to look up. + :param any default: The value to set if key is not in the dictionary + :returns: D[k] if k in D, else d. + :rtype: any + """ if default is _UNSET: return self._data.setdefault(key) return self._data.setdefault(key, default) @@ -910,6 +957,19 @@ def _failsafe_deserialize( return None +def _failsafe_deserialize_xml( + deserializer: typing.Any, + value: typing.Any, +) -> typing.Any: + try: + return _deserialize_xml(deserializer, value) + except DeserializationError: + _LOGGER.warning( + "Ran into a deserialization error. Ignoring since this is failsafe deserialization", exc_info=True + ) + return None + + class _RestField: def __init__( self, diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/_serialization.py b/sdk/ai/azure-ai-projects/azure/ai/projects/_serialization.py index 670738f0789c..a066e16a64dd 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/_serialization.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/_serialization.py @@ -48,9 +48,7 @@ IO, Mapping, Callable, - TypeVar, MutableMapping, - Type, List, ) @@ -61,13 +59,13 @@ import xml.etree.ElementTree as ET import isodate # type: ignore +from typing_extensions import Self from azure.core.exceptions import DeserializationError, SerializationError from azure.core.serialization import NULL as CoreNull _BOM = codecs.BOM_UTF8.decode(encoding="utf-8") -ModelType = TypeVar("ModelType", bound="Model") JSON = MutableMapping[str, Any] @@ -384,25 +382,25 @@ def _infer_class_models(cls): return client_models @classmethod - def deserialize(cls: Type[ModelType], data: Any, content_type: Optional[str] = None) -> ModelType: + def deserialize(cls, data: Any, content_type: Optional[str] = None) -> Self: """Parse a str using the RestAPI syntax and return a model. :param str data: A str using RestAPI structure. JSON by default. :param str content_type: JSON by default, set application/xml if XML. :returns: An instance of this model - :raises: DeserializationError if something went wrong - :rtype: ModelType + :raises DeserializationError: if something went wrong + :rtype: Self """ deserializer = Deserializer(cls._infer_class_models()) return deserializer(cls.__name__, data, content_type=content_type) # type: ignore @classmethod def from_dict( - cls: Type[ModelType], + cls, data: Any, key_extractors: Optional[Callable[[str, Dict[str, Any], Any], Any]] = None, content_type: Optional[str] = None, - ) -> ModelType: + ) -> Self: """Parse a dict using given key extractor return a model. By default consider key @@ -414,7 +412,7 @@ def from_dict( :param str content_type: JSON by default, set application/xml if XML. :returns: An instance of this model :raises: DeserializationError if something went wrong - :rtype: ModelType + :rtype: Self """ deserializer = Deserializer(cls._infer_class_models()) deserializer.key_extractors = ( # type: ignore @@ -560,7 +558,7 @@ def _serialize( # pylint: disable=too-many-nested-blocks, too-many-branches, to :param object target_obj: The data to be serialized. :param str data_type: The type to be serialized from. :rtype: str, dict - :raises: SerializationError if serialization fails. + :raises SerializationError: if serialization fails. :returns: The serialized data. """ key_transformer = kwargs.get("key_transformer", self.key_transformer) @@ -670,8 +668,8 @@ def body(self, data, data_type, **kwargs): :param object data: The data to be serialized. :param str data_type: The type to be serialized from. :rtype: dict - :raises: SerializationError if serialization fails. - :raises: ValueError if data is None + :raises SerializationError: if serialization fails. + :raises ValueError: if data is None :returns: The serialized request body """ @@ -715,8 +713,8 @@ def url(self, name, data, data_type, **kwargs): :param str data_type: The type to be serialized from. :rtype: str :returns: The serialized URL path - :raises: TypeError if serialization fails. - :raises: ValueError if data is None + :raises TypeError: if serialization fails. + :raises ValueError: if data is None """ try: output = self.serialize_data(data, data_type, **kwargs) @@ -739,8 +737,8 @@ def query(self, name, data, data_type, **kwargs): :param object data: The data to be serialized. :param str data_type: The type to be serialized from. :rtype: str, list - :raises: TypeError if serialization fails. - :raises: ValueError if data is None + :raises TypeError: if serialization fails. + :raises ValueError: if data is None :returns: The serialized query parameter """ try: @@ -769,8 +767,8 @@ def header(self, name, data, data_type, **kwargs): :param object data: The data to be serialized. :param str data_type: The type to be serialized from. :rtype: str - :raises: TypeError if serialization fails. - :raises: ValueError if data is None + :raises TypeError: if serialization fails. + :raises ValueError: if data is None :returns: The serialized header """ try: @@ -789,9 +787,9 @@ def serialize_data(self, data, data_type, **kwargs): :param object data: The data to be serialized. :param str data_type: The type to be serialized from. - :raises: AttributeError if required data is None. - :raises: ValueError if data is None - :raises: SerializationError if serialization fails. + :raises AttributeError: if required data is None. + :raises ValueError: if data is None + :raises SerializationError: if serialization fails. :returns: The serialized data. :rtype: str, int, float, bool, dict, list """ @@ -1126,7 +1124,7 @@ def serialize_rfc(attr, **kwargs): # pylint: disable=unused-argument :param Datetime attr: Object to be serialized. :rtype: str - :raises: TypeError if format invalid. + :raises TypeError: if format invalid. :return: serialized rfc """ try: @@ -1152,7 +1150,7 @@ def serialize_iso(attr, **kwargs): # pylint: disable=unused-argument :param Datetime attr: Object to be serialized. :rtype: str - :raises: SerializationError if format invalid. + :raises SerializationError: if format invalid. :return: serialized iso """ if isinstance(attr, str): @@ -1185,7 +1183,7 @@ def serialize_unix(attr, **kwargs): # pylint: disable=unused-argument :param Datetime attr: Object to be serialized. :rtype: int - :raises: SerializationError if format invalid + :raises SerializationError: if format invalid :return: serialied unix """ if isinstance(attr, int): @@ -1422,7 +1420,7 @@ def __call__(self, target_obj, response_data, content_type=None): :param str target_obj: Target data type to deserialize to. :param requests.Response response_data: REST response object. :param str content_type: Swagger "produces" if available. - :raises: DeserializationError if deserialization fails. + :raises DeserializationError: if deserialization fails. :return: Deserialized object. :rtype: object """ @@ -1436,7 +1434,7 @@ def _deserialize(self, target_obj, data): # pylint: disable=inconsistent-return :param str target_obj: Target data type to deserialize to. :param object data: Object to deserialize. - :raises: DeserializationError if deserialization fails. + :raises DeserializationError: if deserialization fails. :return: Deserialized object. :rtype: object """ @@ -1651,7 +1649,7 @@ def deserialize_data(self, data, data_type): # pylint: disable=too-many-return- :param str data: The response string to be deserialized. :param str data_type: The type to deserialize to. - :raises: DeserializationError if deserialization fails. + :raises DeserializationError: if deserialization fails. :return: Deserialized object. :rtype: object """ @@ -1733,7 +1731,7 @@ def deserialize_object(self, attr, **kwargs): # pylint: disable=too-many-return :param dict attr: Dictionary to be deserialized. :return: Deserialized object. :rtype: dict - :raises: TypeError if non-builtin datatype encountered. + :raises TypeError: if non-builtin datatype encountered. """ if attr is None: return None @@ -1779,7 +1777,7 @@ def deserialize_basic(self, attr, data_type): # pylint: disable=too-many-return :param str data_type: deserialization data type. :return: Deserialized basic type. :rtype: str, int, float or bool - :raises: TypeError if string format is not valid. + :raises TypeError: if string format is not valid. """ # If we're here, data is supposed to be a basic type. # If it's still an XML node, take the text @@ -1870,7 +1868,7 @@ def deserialize_bytearray(attr): :param str attr: response string to be deserialized. :return: Deserialized bytearray :rtype: bytearray - :raises: TypeError if string format invalid. + :raises TypeError: if string format invalid. """ if isinstance(attr, ET.Element): attr = attr.text @@ -1883,7 +1881,7 @@ def deserialize_base64(attr): :param str attr: response string to be deserialized. :return: Deserialized base64 string :rtype: bytearray - :raises: TypeError if string format invalid. + :raises TypeError: if string format invalid. """ if isinstance(attr, ET.Element): attr = attr.text @@ -1898,7 +1896,7 @@ def deserialize_decimal(attr): :param str attr: response string to be deserialized. :return: Deserialized decimal - :raises: DeserializationError if string format invalid. + :raises DeserializationError: if string format invalid. :rtype: decimal """ if isinstance(attr, ET.Element): @@ -1916,7 +1914,7 @@ def deserialize_long(attr): :param str attr: response string to be deserialized. :return: Deserialized int :rtype: long or int - :raises: ValueError if string format invalid. + :raises ValueError: if string format invalid. """ if isinstance(attr, ET.Element): attr = attr.text @@ -1929,7 +1927,7 @@ def deserialize_duration(attr): :param str attr: response string to be deserialized. :return: Deserialized duration :rtype: TimeDelta - :raises: DeserializationError if string format invalid. + :raises DeserializationError: if string format invalid. """ if isinstance(attr, ET.Element): attr = attr.text @@ -1947,7 +1945,7 @@ def deserialize_date(attr): :param str attr: response string to be deserialized. :return: Deserialized date :rtype: Date - :raises: DeserializationError if string format invalid. + :raises DeserializationError: if string format invalid. """ if isinstance(attr, ET.Element): attr = attr.text @@ -1963,7 +1961,7 @@ def deserialize_time(attr): :param str attr: response string to be deserialized. :return: Deserialized time :rtype: datetime.time - :raises: DeserializationError if string format invalid. + :raises DeserializationError: if string format invalid. """ if isinstance(attr, ET.Element): attr = attr.text @@ -1978,7 +1976,7 @@ def deserialize_rfc(attr): :param str attr: response string to be deserialized. :return: Deserialized RFC datetime :rtype: Datetime - :raises: DeserializationError if string format invalid. + :raises DeserializationError: if string format invalid. """ if isinstance(attr, ET.Element): attr = attr.text @@ -2001,7 +1999,7 @@ def deserialize_iso(attr): :param str attr: response string to be deserialized. :return: Deserialized ISO datetime :rtype: Datetime - :raises: DeserializationError if string format invalid. + :raises DeserializationError: if string format invalid. """ if isinstance(attr, ET.Element): attr = attr.text @@ -2039,7 +2037,7 @@ def deserialize_unix(attr): :param int attr: Object to be serialized. :return: Deserialized datetime :rtype: Datetime - :raises: DeserializationError if format invalid + :raises DeserializationError: if format invalid """ if isinstance(attr, ET.Element): attr = int(attr.text) # type: ignore diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_operations.py b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_operations.py index 0b0b50eeac06..1e990007a1f0 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_operations.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_operations.py @@ -121,20 +121,6 @@ def __init__(self, *args, **kwargs) -> None: self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") - @overload - async def create_agent(self, body: JSON, *, content_type: str = "application/json", **kwargs: Any) -> _models.Agent: - """Creates a new agent. - - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: Agent. The Agent is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.Agent - :raises ~azure.core.exceptions.HttpResponseError: - """ - @overload async def create_agent( self, @@ -199,6 +185,20 @@ async def create_agent( :raises ~azure.core.exceptions.HttpResponseError: """ + @overload + async def create_agent(self, body: JSON, *, content_type: str = "application/json", **kwargs: Any) -> _models.Agent: + """Creates a new agent. + + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: Agent. The Agent is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.Agent + :raises ~azure.core.exceptions.HttpResponseError: + """ + @overload async def create_agent( self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any @@ -513,24 +513,6 @@ async def get_agent(self, assistant_id: str, **kwargs: Any) -> _models.Agent: return deserialized # type: ignore - @overload - async def update_agent( - self, assistant_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.Agent: - """Modifies an existing agent. - - :param assistant_id: The ID of the agent to modify. Required. - :type assistant_id: str - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: Agent. The Agent is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.Agent - :raises ~azure.core.exceptions.HttpResponseError: - """ - @overload async def update_agent( self, @@ -600,6 +582,24 @@ async def update_agent( :raises ~azure.core.exceptions.HttpResponseError: """ + @overload + async def update_agent( + self, assistant_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.Agent: + """Modifies an existing agent. + + :param assistant_id: The ID of the agent to modify. Required. + :type assistant_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: Agent. The Agent is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.Agent + :raises ~azure.core.exceptions.HttpResponseError: + """ + @overload async def update_agent( self, assistant_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any @@ -829,22 +829,6 @@ async def delete_agent(self, assistant_id: str, **kwargs: Any) -> _models.AgentD return deserialized # type: ignore - @overload - async def create_thread( - self, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.AgentThread: - """Creates a new thread. Threads contain messages and can be run by agents. - - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: AgentThread. The AgentThread is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.AgentThread - :raises ~azure.core.exceptions.HttpResponseError: - """ - @overload async def create_thread( self, @@ -879,6 +863,22 @@ async def create_thread( :raises ~azure.core.exceptions.HttpResponseError: """ + @overload + async def create_thread( + self, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.AgentThread: + """Creates a new thread. Threads contain messages and can be run by agents. + + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: AgentThread. The AgentThread is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.AgentThread + :raises ~azure.core.exceptions.HttpResponseError: + """ + @overload async def create_thread( self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any @@ -1059,24 +1059,6 @@ async def get_thread(self, thread_id: str, **kwargs: Any) -> _models.AgentThread return deserialized # type: ignore - @overload - async def update_thread( - self, thread_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.AgentThread: - """Modifies an existing thread. - - :param thread_id: The ID of the thread to modify. Required. - :type thread_id: str - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: AgentThread. The AgentThread is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.AgentThread - :raises ~azure.core.exceptions.HttpResponseError: - """ - @overload async def update_thread( self, @@ -1110,6 +1092,24 @@ async def update_thread( :raises ~azure.core.exceptions.HttpResponseError: """ + @overload + async def update_thread( + self, thread_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.AgentThread: + """Modifies an existing thread. + + :param thread_id: The ID of the thread to modify. Required. + :type thread_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: AgentThread. The AgentThread is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.AgentThread + :raises ~azure.core.exceptions.HttpResponseError: + """ + @overload async def update_thread( self, thread_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any @@ -1292,24 +1292,6 @@ async def delete_thread(self, thread_id: str, **kwargs: Any) -> _models.ThreadDe return deserialized # type: ignore - @overload - async def create_message( - self, thread_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.ThreadMessage: - """Creates a new message on a specified thread. - - :param thread_id: Identifier of the thread. Required. - :type thread_id: str - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.ThreadMessage - :raises ~azure.core.exceptions.HttpResponseError: - """ - @overload async def create_message( self, @@ -1355,6 +1337,24 @@ async def create_message( :raises ~azure.core.exceptions.HttpResponseError: """ + @overload + async def create_message( + self, thread_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadMessage: + """Creates a new message on a specified thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.ThreadMessage + :raises ~azure.core.exceptions.HttpResponseError: + """ + @overload async def create_message( self, thread_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any @@ -1657,7 +1657,13 @@ async def get_message(self, thread_id: str, message_id: str, **kwargs: Any) -> _ @overload async def update_message( - self, thread_id: str, message_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + self, + thread_id: str, + message_id: str, + *, + content_type: str = "application/json", + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any ) -> _models.ThreadMessage: """Modifies an existing message on an existing thread. @@ -1665,11 +1671,14 @@ async def update_message( :type thread_id: str :param message_id: Identifier of the message. Required. :type message_id: str - :param body: Required. - :type body: JSON :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping :rtype: ~azure.ai.projects.models.ThreadMessage :raises ~azure.core.exceptions.HttpResponseError: @@ -1677,13 +1686,7 @@ async def update_message( @overload async def update_message( - self, - thread_id: str, - message_id: str, - *, - content_type: str = "application/json", - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any + self, thread_id: str, message_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any ) -> _models.ThreadMessage: """Modifies an existing message on an existing thread. @@ -1691,14 +1694,11 @@ async def update_message( :type thread_id: str :param message_id: Identifier of the message. Required. :type message_id: str + :param body: Required. + :type body: JSON :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping :rtype: ~azure.ai.projects.models.ThreadMessage :raises ~azure.core.exceptions.HttpResponseError: @@ -1820,35 +1820,6 @@ async def update_message( return deserialized # type: ignore - @overload - async def create_run( - self, - thread_id: str, - body: JSON, - *, - include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, - content_type: str = "application/json", - **kwargs: Any - ) -> _models.ThreadRun: - """Creates a new run for an agent thread. - - :param thread_id: Identifier of the thread. Required. - :type thread_id: str - :param body: Required. - :type body: JSON - :keyword include: A list of additional fields to include in the response. - Currently the only supported value is - ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result - content. Default value is None. - :paramtype include: list[str or ~azure.ai.projects.models.RunAdditionalFieldList] - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - @overload async def create_run( self, @@ -1962,6 +1933,35 @@ async def create_run( :raises ~azure.core.exceptions.HttpResponseError: """ + @overload + async def create_run( + self, + thread_id: str, + body: JSON, + *, + include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.ThreadRun: + """Creates a new run for an agent thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param body: Required. + :type body: JSON + :keyword include: A list of additional fields to include in the response. + Currently the only supported value is + ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result + content. Default value is None. + :paramtype include: list[str or ~azure.ai.projects.models.RunAdditionalFieldList] + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + @overload async def create_run( self, @@ -2355,7 +2355,13 @@ async def get_run(self, thread_id: str, run_id: str, **kwargs: Any) -> _models.T @overload async def update_run( - self, thread_id: str, run_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + self, + thread_id: str, + run_id: str, + *, + content_type: str = "application/json", + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any ) -> _models.ThreadRun: """Modifies an existing thread run. @@ -2363,11 +2369,14 @@ async def update_run( :type thread_id: str :param run_id: Identifier of the run. Required. :type run_id: str - :param body: Required. - :type body: JSON :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] :return: ThreadRun. The ThreadRun is compatible with MutableMapping :rtype: ~azure.ai.projects.models.ThreadRun :raises ~azure.core.exceptions.HttpResponseError: @@ -2375,13 +2384,7 @@ async def update_run( @overload async def update_run( - self, - thread_id: str, - run_id: str, - *, - content_type: str = "application/json", - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any + self, thread_id: str, run_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any ) -> _models.ThreadRun: """Modifies an existing thread run. @@ -2389,14 +2392,11 @@ async def update_run( :type thread_id: str :param run_id: Identifier of the run. Required. :type run_id: str + :param body: Required. + :type body: JSON :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] :return: ThreadRun. The ThreadRun is compatible with MutableMapping :rtype: ~azure.ai.projects.models.ThreadRun :raises ~azure.core.exceptions.HttpResponseError: @@ -2520,7 +2520,14 @@ async def update_run( @overload async def submit_tool_outputs_to_run( - self, thread_id: str, run_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + self, + thread_id: str, + run_id: str, + *, + tool_outputs: List[_models.ToolOutput], + content_type: str = "application/json", + stream_parameter: Optional[bool] = None, + **kwargs: Any ) -> _models.ThreadRun: """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool outputs will have a status of 'requires_action' with a required_action.type of @@ -2530,11 +2537,14 @@ async def submit_tool_outputs_to_run( :type thread_id: str :param run_id: Identifier of the run. Required. :type run_id: str - :param body: Required. - :type body: JSON + :keyword tool_outputs: A list of tools for which the outputs are being submitted. Required. + :paramtype tool_outputs: list[~azure.ai.projects.models.ToolOutput] :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str + :keyword stream_parameter: If true, returns a stream of events that happen during the Run as + server-sent events, terminating when the run enters a terminal state. Default value is None. + :paramtype stream_parameter: bool :return: ThreadRun. The ThreadRun is compatible with MutableMapping :rtype: ~azure.ai.projects.models.ThreadRun :raises ~azure.core.exceptions.HttpResponseError: @@ -2542,14 +2552,7 @@ async def submit_tool_outputs_to_run( @overload async def submit_tool_outputs_to_run( - self, - thread_id: str, - run_id: str, - *, - tool_outputs: List[_models.ToolOutput], - content_type: str = "application/json", - stream_parameter: Optional[bool] = None, - **kwargs: Any + self, thread_id: str, run_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any ) -> _models.ThreadRun: """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool outputs will have a status of 'requires_action' with a required_action.type of @@ -2559,14 +2562,11 @@ async def submit_tool_outputs_to_run( :type thread_id: str :param run_id: Identifier of the run. Required. :type run_id: str - :keyword tool_outputs: A list of tools for which the outputs are being submitted. Required. - :paramtype tool_outputs: list[~azure.ai.projects.models.ToolOutput] + :param body: Required. + :type body: JSON :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str - :keyword stream_parameter: If true, returns a stream of events that happen during the Run as - server-sent events, terminating when the run enters a terminal state. Default value is None. - :paramtype stream_parameter: bool :return: ThreadRun. The ThreadRun is compatible with MutableMapping :rtype: ~azure.ai.projects.models.ThreadRun :raises ~azure.core.exceptions.HttpResponseError: @@ -2763,22 +2763,6 @@ async def cancel_run(self, thread_id: str, run_id: str, **kwargs: Any) -> _model return deserialized # type: ignore - @overload - async def create_thread_and_run( - self, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.ThreadRun: - """Creates a new agent thread and immediately starts a run using that new thread. - - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - @overload async def create_thread_and_run( self, @@ -2882,6 +2866,22 @@ async def create_thread_and_run( :raises ~azure.core.exceptions.HttpResponseError: """ + @overload + async def create_thread_and_run( + self, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadRun: + """Creates a new agent thread and immediately starts a run using that new thread. + + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + @overload async def create_thread_and_run( self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any @@ -3345,17 +3345,6 @@ async def list_files( return deserialized # type: ignore - @overload - async def upload_file(self, body: JSON, **kwargs: Any) -> _models.OpenAIFile: - """Uploads a file for use by other operations. - - :param body: Required. - :type body: JSON - :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.OpenAIFile - :raises ~azure.core.exceptions.HttpResponseError: - """ - @overload async def upload_file( self, *, file: FileType, purpose: Union[str, _models.FilePurpose], filename: Optional[str] = None, **kwargs: Any @@ -3376,6 +3365,17 @@ async def upload_file( :raises ~azure.core.exceptions.HttpResponseError: """ + @overload + async def upload_file(self, body: JSON, **kwargs: Any) -> _models.OpenAIFile: + """Uploads a file for use by other operations. + + :param body: Required. + :type body: JSON + :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.OpenAIFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + @distributed_trace_async async def upload_file( self, @@ -3757,22 +3757,6 @@ async def list_vector_stores( return deserialized # type: ignore - @overload - async def create_vector_store( - self, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.VectorStore: - """Creates a vector store. - - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: VectorStore. The VectorStore is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.VectorStore - :raises ~azure.core.exceptions.HttpResponseError: - """ - @overload async def create_vector_store( self, @@ -3814,6 +3798,22 @@ async def create_vector_store( :raises ~azure.core.exceptions.HttpResponseError: """ + @overload + async def create_vector_store( + self, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.VectorStore: + """Creates a vector store. + + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + @overload async def create_vector_store( self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any @@ -4008,24 +4008,6 @@ async def get_vector_store(self, vector_store_id: str, **kwargs: Any) -> _models return deserialized # type: ignore - @overload - async def modify_vector_store( - self, vector_store_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.VectorStore: - """The ID of the vector store to modify. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: VectorStore. The VectorStore is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.VectorStore - :raises ~azure.core.exceptions.HttpResponseError: - """ - @overload async def modify_vector_store( self, @@ -4058,6 +4040,24 @@ async def modify_vector_store( :raises ~azure.core.exceptions.HttpResponseError: """ + @overload + async def modify_vector_store( + self, vector_store_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.VectorStore: + """The ID of the vector store to modify. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + @overload async def modify_vector_store( self, vector_store_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any @@ -4340,24 +4340,6 @@ async def list_vector_store_files( return deserialized # type: ignore - @overload - async def create_vector_store_file( - self, vector_store_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.VectorStoreFile: - """Create a vector store file by attaching a file to a vector store. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.VectorStoreFile - :raises ~azure.core.exceptions.HttpResponseError: - """ - @overload async def create_vector_store_file( self, @@ -4388,6 +4370,24 @@ async def create_vector_store_file( :raises ~azure.core.exceptions.HttpResponseError: """ + @overload + async def create_vector_store_file( + self, vector_store_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.VectorStoreFile: + """Create a vector store file by attaching a file to a vector store. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.VectorStoreFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + @overload async def create_vector_store_file( self, vector_store_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any @@ -4643,24 +4643,6 @@ async def delete_vector_store_file( return deserialized # type: ignore - @overload - async def create_vector_store_file_batch( - self, vector_store_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.VectorStoreFileBatch: - """Create a vector store file batch. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.VectorStoreFileBatch - :raises ~azure.core.exceptions.HttpResponseError: - """ - @overload async def create_vector_store_file_batch( self, @@ -4691,6 +4673,24 @@ async def create_vector_store_file_batch( :raises ~azure.core.exceptions.HttpResponseError: """ + @overload + async def create_vector_store_file_batch( + self, vector_store_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.VectorStoreFileBatch: + """Create a vector store file batch. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.VectorStoreFileBatch + :raises ~azure.core.exceptions.HttpResponseError: + """ + @overload async def create_vector_store_file_batch( self, vector_store_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any @@ -5283,11 +5283,11 @@ async def _get_connection(self, connection_name: str, **kwargs: Any) -> _models. @overload async def _get_connection_with_secrets( - self, connection_name: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + self, connection_name: str, *, ignored: str, content_type: str = "application/json", **kwargs: Any ) -> _models._models.GetConnectionResponse: ... @overload async def _get_connection_with_secrets( - self, connection_name: str, *, ignored: str, content_type: str = "application/json", **kwargs: Any + self, connection_name: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any ) -> _models._models.GetConnectionResponse: ... @overload async def _get_connection_with_secrets( diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py index 3bc909516929..fa48d47767fb 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py @@ -484,7 +484,11 @@ async def get_default( ) -> ConnectionProperties: """Get the properties of the default connection of a certain connection type, with or without populating authentication credentials. Raises ~azure.core.exceptions.ResourceNotFoundError - exception if a connection with the given name was not found. + exception if there are no connections of the given type. + + .. note:: + `get_default(connection_type=ConnectionType.AZURE_BLOB_STORAGE, include_credentials=True)` does not + currently work. It does work with `include_credentials=False`. :keyword connection_type: The connection type. Required. :type connection_type: ~azure.ai.projects.models._models.ConnectionType @@ -521,6 +525,8 @@ async def get( populating authentication credentials. Raises ~azure.core.exceptions.ResourceNotFoundError exception if a connection with the given name was not found. + .. note:: This method is not supported for Azure Blob Storage connections. + :keyword connection_name: Connection Name. Required. :type connection_name: str :keyword include_credentials: Whether to populate the connection properties with authentication credentials. @@ -657,20 +663,6 @@ def __init__(self, *args, **kwargs) -> None: super().__init__(*args, **kwargs) self._toolset: Dict[str, _models.AsyncToolSet] = {} - @overload - async def create_agent(self, body: JSON, *, content_type: str = "application/json", **kwargs: Any) -> _models.Agent: - """Creates a new agent. - - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: Agent. The Agent is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.Agent - :raises ~azure.core.exceptions.HttpResponseError: - """ - # pylint: disable=arguments-differ @overload async def create_agent( # pylint: disable=arguments-differ @@ -794,6 +786,20 @@ async def create_agent( # pylint: disable=arguments-differ :raises ~azure.core.exceptions.HttpResponseError: """ + @overload + async def create_agent(self, body: JSON, *, content_type: str = "application/json", **kwargs: Any) -> _models.Agent: + """Creates a new agent. + + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: Agent. The Agent is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.Agent + :raises ~azure.core.exceptions.HttpResponseError: + """ + @overload async def create_agent( self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any @@ -890,24 +896,6 @@ async def create_agent( self._toolset[new_agent.id] = toolset return new_agent - @overload - async def update_agent( - self, assistant_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.Agent: - """Modifies an existing agent. - - :param assistant_id: The ID of the agent to modify. Required. - :type assistant_id: str - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: Agent. The Agent is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.Agent - :raises ~azure.core.exceptions.HttpResponseError: - """ - # pylint: disable=arguments-differ @overload async def update_agent( # pylint: disable=arguments-differ @@ -1040,6 +1028,24 @@ async def update_agent( # pylint: disable=arguments-differ :raises ~azure.core.exceptions.HttpResponseError: """ + @overload + async def update_agent( + self, assistant_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.Agent: + """Modifies an existing agent. + + :param assistant_id: The ID of the agent to modify. Required. + :type assistant_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: Agent. The Agent is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.Agent + :raises ~azure.core.exceptions.HttpResponseError: + """ + @overload async def update_agent( self, assistant_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any @@ -1180,35 +1186,6 @@ def _validate_tools_and_tool_resources( "Tools must contain a CodeInterpreterToolDefinition when tool_resources.code_interpreter is provided" ) - @overload - async def create_run( - self, - thread_id: str, - body: JSON, - *, - include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, - content_type: str = "application/json", - **kwargs: Any, - ) -> _models.ThreadRun: - """Creates a new run for an agent thread. - - :param thread_id: Required. - :type thread_id: str - :param body: Required. - :type body: JSON - :keyword include: A list of additional fields to include in the response. - Currently the only supported value is - ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result - content. Default value is None. - :paramtype include: list[str or ~azure.ai.projects.models.RunAdditionalFieldList] - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - # pylint: disable=arguments-differ @overload async def create_run( # pylint: disable=arguments-differ @@ -1316,6 +1293,35 @@ async def create_run( # pylint: disable=arguments-differ :raises ~azure.core.exceptions.HttpResponseError: """ + @overload + async def create_run( + self, + thread_id: str, + body: JSON, + *, + include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, + content_type: str = "application/json", + **kwargs: Any, + ) -> _models.ThreadRun: + """Creates a new run for an agent thread. + + :param thread_id: Required. + :type thread_id: str + :param body: Required. + :type body: JSON + :keyword include: A list of additional fields to include in the response. + Currently the only supported value is + ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result + content. Default value is None. + :paramtype include: list[str or ~azure.ai.projects.models.RunAdditionalFieldList] + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + @overload async def create_run( self, @@ -2090,9 +2096,16 @@ async def create_stream( # pyright: ignore[reportInconsistentOverload] return _models.AsyncAgentRunStream(response_iterator, self._handle_submit_tool_outputs, event_handler) + # pylint: disable=arguments-differ @overload - async def submit_tool_outputs_to_run( - self, thread_id: str, run_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + async def submit_tool_outputs_to_run( # pylint: disable=arguments-differ + self, + thread_id: str, + run_id: str, + *, + tool_outputs: List[_models.ToolOutput], + content_type: str = "application/json", + **kwargs: Any, ) -> _models.ThreadRun: """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool outputs will have a status of 'requires_action' with a required_action.type of @@ -2102,8 +2115,8 @@ async def submit_tool_outputs_to_run( :type thread_id: str :param run_id: Required. :type run_id: str - :param body: Required. - :type body: JSON + :keyword tool_outputs: Required. + :paramtype tool_outputs: list[~azure.ai.projects.models.ToolOutput] :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str @@ -2112,16 +2125,9 @@ async def submit_tool_outputs_to_run( :raises ~azure.core.exceptions.HttpResponseError: """ - # pylint: disable=arguments-differ @overload - async def submit_tool_outputs_to_run( # pylint: disable=arguments-differ - self, - thread_id: str, - run_id: str, - *, - tool_outputs: List[_models.ToolOutput], - content_type: str = "application/json", - **kwargs: Any, + async def submit_tool_outputs_to_run( + self, thread_id: str, run_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any ) -> _models.ThreadRun: """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool outputs will have a status of 'requires_action' with a required_action.type of @@ -2131,8 +2137,8 @@ async def submit_tool_outputs_to_run( # pylint: disable=arguments-differ :type thread_id: str :param run_id: Required. :type run_id: str - :keyword tool_outputs: Required. - :paramtype tool_outputs: list[~azure.ai.projects.models.ToolOutput] + :param body: Required. + :type body: JSON :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str @@ -2341,12 +2347,18 @@ async def _handle_submit_tool_outputs( thread_id=run.thread_id, run_id=run.id, tool_outputs=tool_outputs, event_handler=event_handler ) + # pylint: disable=arguments-differ @overload - async def upload_file(self, body: JSON, **kwargs: Any) -> _models.OpenAIFile: + async def upload_file( # pylint: disable=arguments-differ + self, *, file_path: str, purpose: Union[str, _models.FilePurpose], **kwargs: Any + ) -> _models.OpenAIFile: """Uploads a file for use by other operations. - :param body: Required. - :type body: JSON + :keyword file_path: Required. + :type file_path: str + :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", + "assistants_output", "batch", "batch_output", and "vision". Required. + :paramtype purpose: str or ~azure.ai.projects.models.FilePurpose :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping :rtype: ~azure.ai.projects.models.OpenAIFile :raises ~azure.core.exceptions.HttpResponseError: @@ -2371,18 +2383,12 @@ async def upload_file( # pylint: disable=arguments-differ :raises ~azure.core.exceptions.HttpResponseError: """ - # pylint: disable=arguments-differ @overload - async def upload_file( # pylint: disable=arguments-differ - self, *, file_path: str, purpose: Union[str, _models.FilePurpose], **kwargs: Any - ) -> _models.OpenAIFile: + async def upload_file(self, body: JSON, **kwargs: Any) -> _models.OpenAIFile: """Uploads a file for use by other operations. - :keyword file_path: Required. - :type file_path: str - :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", - "assistants_output", "batch", "batch_output", and "vision". Required. - :paramtype purpose: str or ~azure.ai.projects.models.FilePurpose + :param body: Required. + :type body: JSON :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping :rtype: ~azure.ai.projects.models.OpenAIFile :raises ~azure.core.exceptions.HttpResponseError: diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_enums.py b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_enums.py index dafd1a31c8d1..6ed9eb9b3162 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_enums.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_enums.py @@ -134,6 +134,8 @@ class AuthenticationType(str, Enum, metaclass=CaseInsensitiveEnumMeta): """Entra ID authentication (formerly known as AAD)""" SAS = "SAS" """Shared Access Signature (SAS) authentication""" + NONE = "None" + """No authentication""" class ConnectionType(str, Enum, metaclass=CaseInsensitiveEnumMeta): diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_models.py b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_models.py index 6e6edb150dcf..1d4d35c02c12 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_models.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_models.py @@ -233,7 +233,7 @@ class AgentsNamedToolChoice(_model_base.Model): """ type: Union[str, "_models.AgentsNamedToolChoiceType"] = rest_field() - """the type of tool. If type is ``function``\ , the function name must be set. Required. Known + """the type of tool. If type is ``function``, the function name must be set. Required. Known values are: \"function\", \"code_interpreter\", \"file_search\", \"bing_grounding\", \"fabric_aiskill\", \"sharepoint_grounding\", and \"azure_ai_search\".""" function: Optional["_models.FunctionName"] = rest_field() @@ -447,7 +447,7 @@ class ApplicationInsightsConfiguration(InputData, discriminator="app_insights"): :vartype resource_id: str :ivar query: Query to fetch the data. Required. :vartype query: str - :ivar service_name: Service name. Required. + :ivar service_name: Service name. :vartype service_name: str :ivar connection_string: Connection String to connect to ApplicationInsights. :vartype connection_string: str @@ -459,8 +459,8 @@ class ApplicationInsightsConfiguration(InputData, discriminator="app_insights"): """LogAnalytic Workspace resourceID associated with ApplicationInsights. Required.""" query: str = rest_field() """Query to fetch the data. Required.""" - service_name: str = rest_field(name="serviceName") - """Service name. Required.""" + service_name: Optional[str] = rest_field(name="serviceName") + """Service name.""" connection_string: Optional[str] = rest_field(name="connectionString") """Connection String to connect to ApplicationInsights.""" @@ -470,7 +470,7 @@ def __init__( *, resource_id: str, query: str, - service_name: str, + service_name: Optional[str] = None, connection_string: Optional[str] = None, ) -> None: ... @@ -1744,11 +1744,11 @@ class InternalConnectionProperties(_model_base.Model): You probably want to use the sub-classes and not this class directly. Known sub-classes are: InternalConnectionPropertiesAADAuth, InternalConnectionPropertiesApiKeyAuth, - InternalConnectionPropertiesSASAuth + InternalConnectionPropertiesNoAuth, InternalConnectionPropertiesSASAuth :ivar auth_type: Authentication type of the connection target. Required. Known values are: - "ApiKey", "AAD", and "SAS". + "ApiKey", "AAD", "SAS", and "None". :vartype auth_type: str or ~azure.ai.projects.models.AuthenticationType :ivar category: Category of the connection. Required. Known values are: "AzureOpenAI", "Serverless", "AzureBlob", "AIServices", and "CognitiveSearch". @@ -1760,7 +1760,7 @@ class InternalConnectionProperties(_model_base.Model): __mapping__: Dict[str, _model_base.Model] = {} auth_type: str = rest_discriminator(name="authType") """Authentication type of the connection target. Required. Known values are: \"ApiKey\", \"AAD\", - and \"SAS\".""" + \"SAS\", and \"None\".""" category: Union[str, "_models.ConnectionType"] = rest_field() """Category of the connection. Required. Known values are: \"AzureOpenAI\", \"Serverless\", \"AzureBlob\", \"AIServices\", and \"CognitiveSearch\".""" @@ -1865,6 +1865,41 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, auth_type=AuthenticationType.API_KEY, **kwargs) +class InternalConnectionPropertiesNoAuth(InternalConnectionProperties, discriminator="None"): + """Connection properties for connections with no authentication. + + + :ivar category: Category of the connection. Required. Known values are: "AzureOpenAI", + "Serverless", "AzureBlob", "AIServices", and "CognitiveSearch". + :vartype category: str or ~azure.ai.projects.models.ConnectionType + :ivar target: The connection URL to be used for this service. Required. + :vartype target: str + :ivar auth_type: Authentication type of the connection target. Required. No authentication + :vartype auth_type: str or ~azure.ai.projects.models.NONE + """ + + auth_type: Literal[AuthenticationType.NONE] = rest_discriminator(name="authType") # type: ignore + """Authentication type of the connection target. Required. No authentication""" + + @overload + def __init__( + self, + *, + category: Union[str, "_models.ConnectionType"], + target: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, auth_type=AuthenticationType.NONE, **kwargs) + + class InternalConnectionPropertiesSASAuth(InternalConnectionProperties, discriminator="SAS"): """Connection properties for connections with SAS authentication. @@ -4503,7 +4538,7 @@ class RunStepDeltaCodeInterpreterDetailItemObject(_model_base.Model): # pylint: """The input into the Code Interpreter tool call.""" outputs: Optional[List["_models.RunStepDeltaCodeInterpreterOutput"]] = rest_field() """The outputs from the Code Interpreter tool call. Code Interpreter can output one or more - items, including text (\ ``logs``\ ) or images (\ ``image``\ ). Each of these are represented + items, including text (``logs``) or images (``image``). Each of these are represented by a different object type.""" @@ -5778,9 +5813,9 @@ class ThreadMessageOptions(_model_base.Model): """The role of the entity that is creating the message. Allowed values include: - * ``user``\ : Indicates the message is sent by an actual user and should be used in most + * ``user``: Indicates the message is sent by an actual user and should be used in most cases to represent user-generated messages. - * ``assistant``\ : Indicates the message is generated by the agent. Use this value to insert + * ``assistant``: Indicates the message is generated by the agent. Use this value to insert messages from the agent into the conversation. Required. Known values are: \"user\" and \"assistant\".""" content: str = rest_field() @@ -5941,7 +5976,7 @@ class ThreadRun(_model_base.Model): """Details on why the run is incomplete. Will be ``null`` if the run is not incomplete. Required.""" usage: "_models.RunCompletionUsage" = rest_field() """Usage statistics related to the run. This value will be ``null`` if the run is not in a - terminal state (i.e. ``in_progress``\ , ``queued``\ , etc.). Required.""" + terminal state (i.e. ``in_progress``, ``queued``, etc.). Required.""" temperature: Optional[float] = rest_field() """The sampling temperature used for this run. If not set, defaults to 1.""" top_p: Optional[float] = rest_field() @@ -6178,9 +6213,9 @@ class TruncationObject(_model_base.Model): type: Union[str, "_models.TruncationStrategy"] = rest_field() """The truncation strategy to use for the thread. The default is ``auto``. If set to - ``last_messages``\ , the thread will + ``last_messages``, the thread will be truncated to the ``lastMessages`` count most recent messages in the thread. When set to - ``auto``\ , messages in the middle of the thread + ``auto``, messages in the middle of the thread will be dropped to fit the context length of the model, ``max_prompt_tokens``. Required. Known values are: \"auto\" and \"last_messages\".""" last_messages: Optional[int] = rest_field() @@ -6362,7 +6397,7 @@ class VectorStore(_model_base.Model): file_counts: "_models.VectorStoreFileCount" = rest_field() """Files count grouped by status processed or being processed by this vector store. Required.""" status: Union[str, "_models.VectorStoreStatus"] = rest_field() - """The status of the vector store, which can be either ``expired``\ , ``in_progress``\ , or + """The status of the vector store, which can be either ``expired``, ``in_progress``, or ``completed``. A status of ``completed`` indicates that the vector store is ready for use. Required. Known values are: \"expired\", \"in_progress\", and \"completed\".""" expires_after: Optional["_models.VectorStoreExpirationPolicy"] = rest_field() @@ -6753,8 +6788,8 @@ class VectorStoreFile(_model_base.Model): vector_store_id: str = rest_field() """The ID of the vector store that the file is attached to. Required.""" status: Union[str, "_models.VectorStoreFileStatus"] = rest_field() - """The status of the vector store file, which can be either ``in_progress``\ , ``completed``\ , - ``cancelled``\ , or ``failed``. The status ``completed`` indicates that the vector store file + """The status of the vector store file, which can be either ``in_progress``, ``completed``, + ``cancelled``, or ``failed``. The status ``completed`` indicates that the vector store file is ready for use. Required. Known values are: \"in_progress\", \"completed\", \"failed\", and \"cancelled\".""" last_error: "_models.VectorStoreFileError" = rest_field() @@ -6823,8 +6858,8 @@ class VectorStoreFileBatch(_model_base.Model): vector_store_id: str = rest_field() """The ID of the vector store that the file is attached to. Required.""" status: Union[str, "_models.VectorStoreFileBatchStatus"] = rest_field() - """The status of the vector store files batch, which can be either ``in_progress``\ , - ``completed``\ , ``cancelled`` or ``failed``. Required. Known values are: \"in_progress\", + """The status of the vector store files batch, which can be either ``in_progress``, + ``completed``, ``cancelled`` or ``failed``. Required. Known values are: \"in_progress\", \"completed\", \"cancelled\", and \"failed\".""" file_counts: "_models.VectorStoreFileCount" = rest_field() """Files count grouped by status processed or being processed by this vector store. Required.""" diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_operations.py b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_operations.py index 4126d8a21ef8..ef27ce1eca4c 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_operations.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_operations.py @@ -1532,20 +1532,6 @@ def __init__(self, *args, **kwargs): self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") - @overload - def create_agent(self, body: JSON, *, content_type: str = "application/json", **kwargs: Any) -> _models.Agent: - """Creates a new agent. - - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: Agent. The Agent is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.Agent - :raises ~azure.core.exceptions.HttpResponseError: - """ - @overload def create_agent( self, @@ -1610,6 +1596,20 @@ def create_agent( :raises ~azure.core.exceptions.HttpResponseError: """ + @overload + def create_agent(self, body: JSON, *, content_type: str = "application/json", **kwargs: Any) -> _models.Agent: + """Creates a new agent. + + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: Agent. The Agent is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.Agent + :raises ~azure.core.exceptions.HttpResponseError: + """ + @overload def create_agent(self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any) -> _models.Agent: """Creates a new agent. @@ -1922,24 +1922,6 @@ def get_agent(self, assistant_id: str, **kwargs: Any) -> _models.Agent: return deserialized # type: ignore - @overload - def update_agent( - self, assistant_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.Agent: - """Modifies an existing agent. - - :param assistant_id: The ID of the agent to modify. Required. - :type assistant_id: str - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: Agent. The Agent is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.Agent - :raises ~azure.core.exceptions.HttpResponseError: - """ - @overload def update_agent( self, @@ -2009,6 +1991,24 @@ def update_agent( :raises ~azure.core.exceptions.HttpResponseError: """ + @overload + def update_agent( + self, assistant_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.Agent: + """Modifies an existing agent. + + :param assistant_id: The ID of the agent to modify. Required. + :type assistant_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: Agent. The Agent is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.Agent + :raises ~azure.core.exceptions.HttpResponseError: + """ + @overload def update_agent( self, assistant_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any @@ -2238,22 +2238,6 @@ def delete_agent(self, assistant_id: str, **kwargs: Any) -> _models.AgentDeletio return deserialized # type: ignore - @overload - def create_thread( - self, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.AgentThread: - """Creates a new thread. Threads contain messages and can be run by agents. - - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: AgentThread. The AgentThread is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.AgentThread - :raises ~azure.core.exceptions.HttpResponseError: - """ - @overload def create_thread( self, @@ -2288,6 +2272,22 @@ def create_thread( :raises ~azure.core.exceptions.HttpResponseError: """ + @overload + def create_thread( + self, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.AgentThread: + """Creates a new thread. Threads contain messages and can be run by agents. + + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: AgentThread. The AgentThread is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.AgentThread + :raises ~azure.core.exceptions.HttpResponseError: + """ + @overload def create_thread( self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any @@ -2468,24 +2468,6 @@ def get_thread(self, thread_id: str, **kwargs: Any) -> _models.AgentThread: return deserialized # type: ignore - @overload - def update_thread( - self, thread_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.AgentThread: - """Modifies an existing thread. - - :param thread_id: The ID of the thread to modify. Required. - :type thread_id: str - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: AgentThread. The AgentThread is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.AgentThread - :raises ~azure.core.exceptions.HttpResponseError: - """ - @overload def update_thread( self, @@ -2519,6 +2501,24 @@ def update_thread( :raises ~azure.core.exceptions.HttpResponseError: """ + @overload + def update_thread( + self, thread_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.AgentThread: + """Modifies an existing thread. + + :param thread_id: The ID of the thread to modify. Required. + :type thread_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: AgentThread. The AgentThread is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.AgentThread + :raises ~azure.core.exceptions.HttpResponseError: + """ + @overload def update_thread( self, thread_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any @@ -2701,24 +2701,6 @@ def delete_thread(self, thread_id: str, **kwargs: Any) -> _models.ThreadDeletion return deserialized # type: ignore - @overload - def create_message( - self, thread_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.ThreadMessage: - """Creates a new message on a specified thread. - - :param thread_id: Identifier of the thread. Required. - :type thread_id: str - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.ThreadMessage - :raises ~azure.core.exceptions.HttpResponseError: - """ - @overload def create_message( self, @@ -2764,6 +2746,24 @@ def create_message( :raises ~azure.core.exceptions.HttpResponseError: """ + @overload + def create_message( + self, thread_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadMessage: + """Creates a new message on a specified thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.ThreadMessage + :raises ~azure.core.exceptions.HttpResponseError: + """ + @overload def create_message( self, thread_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any @@ -3066,7 +3066,13 @@ def get_message(self, thread_id: str, message_id: str, **kwargs: Any) -> _models @overload def update_message( - self, thread_id: str, message_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + self, + thread_id: str, + message_id: str, + *, + content_type: str = "application/json", + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any ) -> _models.ThreadMessage: """Modifies an existing message on an existing thread. @@ -3074,11 +3080,14 @@ def update_message( :type thread_id: str :param message_id: Identifier of the message. Required. :type message_id: str - :param body: Required. - :type body: JSON :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping :rtype: ~azure.ai.projects.models.ThreadMessage :raises ~azure.core.exceptions.HttpResponseError: @@ -3086,13 +3095,7 @@ def update_message( @overload def update_message( - self, - thread_id: str, - message_id: str, - *, - content_type: str = "application/json", - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any + self, thread_id: str, message_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any ) -> _models.ThreadMessage: """Modifies an existing message on an existing thread. @@ -3100,14 +3103,11 @@ def update_message( :type thread_id: str :param message_id: Identifier of the message. Required. :type message_id: str + :param body: Required. + :type body: JSON :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping :rtype: ~azure.ai.projects.models.ThreadMessage :raises ~azure.core.exceptions.HttpResponseError: @@ -3229,35 +3229,6 @@ def update_message( return deserialized # type: ignore - @overload - def create_run( - self, - thread_id: str, - body: JSON, - *, - include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, - content_type: str = "application/json", - **kwargs: Any - ) -> _models.ThreadRun: - """Creates a new run for an agent thread. - - :param thread_id: Identifier of the thread. Required. - :type thread_id: str - :param body: Required. - :type body: JSON - :keyword include: A list of additional fields to include in the response. - Currently the only supported value is - ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result - content. Default value is None. - :paramtype include: list[str or ~azure.ai.projects.models.RunAdditionalFieldList] - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - @overload def create_run( self, @@ -3371,6 +3342,35 @@ def create_run( :raises ~azure.core.exceptions.HttpResponseError: """ + @overload + def create_run( + self, + thread_id: str, + body: JSON, + *, + include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.ThreadRun: + """Creates a new run for an agent thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param body: Required. + :type body: JSON + :keyword include: A list of additional fields to include in the response. + Currently the only supported value is + ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result + content. Default value is None. + :paramtype include: list[str or ~azure.ai.projects.models.RunAdditionalFieldList] + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + @overload def create_run( self, @@ -3764,7 +3764,13 @@ def get_run(self, thread_id: str, run_id: str, **kwargs: Any) -> _models.ThreadR @overload def update_run( - self, thread_id: str, run_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + self, + thread_id: str, + run_id: str, + *, + content_type: str = "application/json", + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any ) -> _models.ThreadRun: """Modifies an existing thread run. @@ -3772,11 +3778,14 @@ def update_run( :type thread_id: str :param run_id: Identifier of the run. Required. :type run_id: str - :param body: Required. - :type body: JSON :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] :return: ThreadRun. The ThreadRun is compatible with MutableMapping :rtype: ~azure.ai.projects.models.ThreadRun :raises ~azure.core.exceptions.HttpResponseError: @@ -3784,13 +3793,7 @@ def update_run( @overload def update_run( - self, - thread_id: str, - run_id: str, - *, - content_type: str = "application/json", - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any + self, thread_id: str, run_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any ) -> _models.ThreadRun: """Modifies an existing thread run. @@ -3798,14 +3801,11 @@ def update_run( :type thread_id: str :param run_id: Identifier of the run. Required. :type run_id: str + :param body: Required. + :type body: JSON :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] :return: ThreadRun. The ThreadRun is compatible with MutableMapping :rtype: ~azure.ai.projects.models.ThreadRun :raises ~azure.core.exceptions.HttpResponseError: @@ -3929,7 +3929,14 @@ def update_run( @overload def submit_tool_outputs_to_run( - self, thread_id: str, run_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + self, + thread_id: str, + run_id: str, + *, + tool_outputs: List[_models.ToolOutput], + content_type: str = "application/json", + stream_parameter: Optional[bool] = None, + **kwargs: Any ) -> _models.ThreadRun: """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool outputs will have a status of 'requires_action' with a required_action.type of @@ -3939,11 +3946,14 @@ def submit_tool_outputs_to_run( :type thread_id: str :param run_id: Identifier of the run. Required. :type run_id: str - :param body: Required. - :type body: JSON + :keyword tool_outputs: A list of tools for which the outputs are being submitted. Required. + :paramtype tool_outputs: list[~azure.ai.projects.models.ToolOutput] :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str + :keyword stream_parameter: If true, returns a stream of events that happen during the Run as + server-sent events, terminating when the run enters a terminal state. Default value is None. + :paramtype stream_parameter: bool :return: ThreadRun. The ThreadRun is compatible with MutableMapping :rtype: ~azure.ai.projects.models.ThreadRun :raises ~azure.core.exceptions.HttpResponseError: @@ -3951,14 +3961,7 @@ def submit_tool_outputs_to_run( @overload def submit_tool_outputs_to_run( - self, - thread_id: str, - run_id: str, - *, - tool_outputs: List[_models.ToolOutput], - content_type: str = "application/json", - stream_parameter: Optional[bool] = None, - **kwargs: Any + self, thread_id: str, run_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any ) -> _models.ThreadRun: """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool outputs will have a status of 'requires_action' with a required_action.type of @@ -3968,14 +3971,11 @@ def submit_tool_outputs_to_run( :type thread_id: str :param run_id: Identifier of the run. Required. :type run_id: str - :keyword tool_outputs: A list of tools for which the outputs are being submitted. Required. - :paramtype tool_outputs: list[~azure.ai.projects.models.ToolOutput] + :param body: Required. + :type body: JSON :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str - :keyword stream_parameter: If true, returns a stream of events that happen during the Run as - server-sent events, terminating when the run enters a terminal state. Default value is None. - :paramtype stream_parameter: bool :return: ThreadRun. The ThreadRun is compatible with MutableMapping :rtype: ~azure.ai.projects.models.ThreadRun :raises ~azure.core.exceptions.HttpResponseError: @@ -4172,22 +4172,6 @@ def cancel_run(self, thread_id: str, run_id: str, **kwargs: Any) -> _models.Thre return deserialized # type: ignore - @overload - def create_thread_and_run( - self, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.ThreadRun: - """Creates a new agent thread and immediately starts a run using that new thread. - - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - @overload def create_thread_and_run( self, @@ -4291,6 +4275,22 @@ def create_thread_and_run( :raises ~azure.core.exceptions.HttpResponseError: """ + @overload + def create_thread_and_run( + self, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadRun: + """Creates a new agent thread and immediately starts a run using that new thread. + + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + @overload def create_thread_and_run( self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any @@ -4754,17 +4754,6 @@ def list_files( return deserialized # type: ignore - @overload - def upload_file(self, body: JSON, **kwargs: Any) -> _models.OpenAIFile: - """Uploads a file for use by other operations. - - :param body: Required. - :type body: JSON - :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.OpenAIFile - :raises ~azure.core.exceptions.HttpResponseError: - """ - @overload def upload_file( self, *, file: FileType, purpose: Union[str, _models.FilePurpose], filename: Optional[str] = None, **kwargs: Any @@ -4785,6 +4774,17 @@ def upload_file( :raises ~azure.core.exceptions.HttpResponseError: """ + @overload + def upload_file(self, body: JSON, **kwargs: Any) -> _models.OpenAIFile: + """Uploads a file for use by other operations. + + :param body: Required. + :type body: JSON + :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.OpenAIFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + @distributed_trace def upload_file( self, @@ -5166,22 +5166,6 @@ def list_vector_stores( return deserialized # type: ignore - @overload - def create_vector_store( - self, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.VectorStore: - """Creates a vector store. - - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: VectorStore. The VectorStore is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.VectorStore - :raises ~azure.core.exceptions.HttpResponseError: - """ - @overload def create_vector_store( self, @@ -5223,6 +5207,22 @@ def create_vector_store( :raises ~azure.core.exceptions.HttpResponseError: """ + @overload + def create_vector_store( + self, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.VectorStore: + """Creates a vector store. + + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + @overload def create_vector_store( self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any @@ -5417,24 +5417,6 @@ def get_vector_store(self, vector_store_id: str, **kwargs: Any) -> _models.Vecto return deserialized # type: ignore - @overload - def modify_vector_store( - self, vector_store_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.VectorStore: - """The ID of the vector store to modify. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: VectorStore. The VectorStore is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.VectorStore - :raises ~azure.core.exceptions.HttpResponseError: - """ - @overload def modify_vector_store( self, @@ -5467,6 +5449,24 @@ def modify_vector_store( :raises ~azure.core.exceptions.HttpResponseError: """ + @overload + def modify_vector_store( + self, vector_store_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.VectorStore: + """The ID of the vector store to modify. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + @overload def modify_vector_store( self, vector_store_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any @@ -5749,24 +5749,6 @@ def list_vector_store_files( return deserialized # type: ignore - @overload - def create_vector_store_file( - self, vector_store_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.VectorStoreFile: - """Create a vector store file by attaching a file to a vector store. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.VectorStoreFile - :raises ~azure.core.exceptions.HttpResponseError: - """ - @overload def create_vector_store_file( self, @@ -5797,6 +5779,24 @@ def create_vector_store_file( :raises ~azure.core.exceptions.HttpResponseError: """ + @overload + def create_vector_store_file( + self, vector_store_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.VectorStoreFile: + """Create a vector store file by attaching a file to a vector store. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.VectorStoreFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + @overload def create_vector_store_file( self, vector_store_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any @@ -6052,24 +6052,6 @@ def delete_vector_store_file( return deserialized # type: ignore - @overload - def create_vector_store_file_batch( - self, vector_store_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.VectorStoreFileBatch: - """Create a vector store file batch. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.VectorStoreFileBatch - :raises ~azure.core.exceptions.HttpResponseError: - """ - @overload def create_vector_store_file_batch( self, @@ -6100,6 +6082,24 @@ def create_vector_store_file_batch( :raises ~azure.core.exceptions.HttpResponseError: """ + @overload + def create_vector_store_file_batch( + self, vector_store_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.VectorStoreFileBatch: + """Create a vector store file batch. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.VectorStoreFileBatch + :raises ~azure.core.exceptions.HttpResponseError: + """ + @overload def create_vector_store_file_batch( self, vector_store_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any @@ -6692,11 +6692,11 @@ def _get_connection(self, connection_name: str, **kwargs: Any) -> _models._model @overload def _get_connection_with_secrets( - self, connection_name: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + self, connection_name: str, *, ignored: str, content_type: str = "application/json", **kwargs: Any ) -> _models._models.GetConnectionResponse: ... @overload def _get_connection_with_secrets( - self, connection_name: str, *, ignored: str, content_type: str = "application/json", **kwargs: Any + self, connection_name: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any ) -> _models._models.GetConnectionResponse: ... @overload def _get_connection_with_secrets( diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py index 95a055bb4082..d23a02f7bbfe 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py @@ -480,7 +480,11 @@ def get_default( ) -> ConnectionProperties: """Get the properties of the default connection of a certain connection type, with or without populating authentication credentials. Raises ~azure.core.exceptions.ResourceNotFoundError - exception if a connection with the given name was not found. + exception if there are no connections of the given type. + + .. note:: + `get_default(connection_type=ConnectionType.AZURE_BLOB_STORAGE, include_credentials=True)` does not + currently work. It does work with `include_credentials=False`. :keyword connection_type: The connection type. Required. :type connection_type: ~azure.ai.projects.models._models.ConnectionType @@ -515,6 +519,8 @@ def get(self, *, connection_name: str, include_credentials: bool = False, **kwar populating authentication credentials. Raises ~azure.core.exceptions.ResourceNotFoundError exception if a connection with the given name was not found. + .. note:: This method is not supported for Azure Blob Storage connections. + :keyword connection_name: Connection Name. Required. :type connection_name: str :keyword include_credentials: Whether to populate the connection properties with authentication credentials. @@ -837,20 +843,6 @@ def __init__(self, *args, **kwargs) -> None: super().__init__(*args, **kwargs) self._toolset: Dict[str, _models.ToolSet] = {} - @overload - def create_agent(self, body: JSON, *, content_type: str = "application/json", **kwargs: Any) -> _models.Agent: - """Creates a new agent. - - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: Agent. The Agent is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.Agent - :raises ~azure.core.exceptions.HttpResponseError: - """ - # pylint: disable=arguments-differ @overload def create_agent( # pylint: disable=arguments-differ @@ -974,6 +966,20 @@ def create_agent( # pylint: disable=arguments-differ :raises ~azure.core.exceptions.HttpResponseError: """ + @overload + def create_agent(self, body: JSON, *, content_type: str = "application/json", **kwargs: Any) -> _models.Agent: + """Creates a new agent. + + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: Agent. The Agent is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.Agent + :raises ~azure.core.exceptions.HttpResponseError: + """ + @overload def create_agent(self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any) -> _models.Agent: """Creates a new agent. @@ -1071,24 +1077,6 @@ def create_agent( self._toolset[new_agent.id] = toolset return new_agent - @overload - def update_agent( - self, assistant_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.Agent: - """Modifies an existing agent. - - :param assistant_id: The ID of the agent to modify. Required. - :type assistant_id: str - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: Agent. The Agent is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.Agent - :raises ~azure.core.exceptions.HttpResponseError: - """ - # pylint: disable=arguments-differ @overload def update_agent( # pylint: disable=arguments-differ @@ -1221,6 +1209,24 @@ def update_agent( # pylint: disable=arguments-differ :raises ~azure.core.exceptions.HttpResponseError: """ + @overload + def update_agent( + self, assistant_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.Agent: + """Modifies an existing agent. + + :param assistant_id: The ID of the agent to modify. Required. + :type assistant_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: Agent. The Agent is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.Agent + :raises ~azure.core.exceptions.HttpResponseError: + """ + @overload def update_agent( self, assistant_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any @@ -1361,35 +1367,6 @@ def _validate_tools_and_tool_resources( "Tools must contain a CodeInterpreterToolDefinition when tool_resources.code_interpreter is provided" ) - @overload - def create_run( - self, - thread_id: str, - body: JSON, - *, - include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, - content_type: str = "application/json", - **kwargs: Any, - ) -> _models.ThreadRun: - """Creates a new run for an agent thread. - - :param thread_id: Required. - :type thread_id: str - :param body: Required. - :type body: JSON - :keyword include: A list of additional fields to include in the response. - Currently the only supported value is - ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result - content. Default value is None. - :paramtype include: list[str or ~azure.ai.projects.models.RunAdditionalFieldList] - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - # pylint: disable=arguments-differ @overload def create_run( # pylint: disable=arguments-differ @@ -1497,6 +1474,35 @@ def create_run( # pylint: disable=arguments-differ :raises ~azure.core.exceptions.HttpResponseError: """ + @overload + def create_run( + self, + thread_id: str, + body: JSON, + *, + include: Optional[List[Union[str, _models.RunAdditionalFieldList]]] = None, + content_type: str = "application/json", + **kwargs: Any, + ) -> _models.ThreadRun: + """Creates a new run for an agent thread. + + :param thread_id: Required. + :type thread_id: str + :param body: Required. + :type body: JSON + :keyword include: A list of additional fields to include in the response. + Currently the only supported value is + ``step_details.tool_calls[*].file_search.results[*].content`` to fetch the file search result + content. Default value is None. + :paramtype include: list[str or ~azure.ai.projects.models.RunAdditionalFieldList] + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + @overload def create_run( self, @@ -2271,9 +2277,17 @@ def create_stream( # pyright: ignore[reportInconsistentOverload] event_handler = cast(_models.BaseAgentEventHandlerT, _models.AgentEventHandler()) return _models.AgentRunStream(response_iterator, self._handle_submit_tool_outputs, event_handler) + # pylint: disable=arguments-differ @overload - def submit_tool_outputs_to_run( - self, thread_id: str, run_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + def submit_tool_outputs_to_run( # pylint: disable=arguments-differ + self, + thread_id: str, + run_id: str, + *, + tool_outputs: List[_models.ToolOutput], + content_type: str = "application/json", + event_handler: Optional[_models.AgentEventHandler] = None, + **kwargs: Any, ) -> _models.ThreadRun: """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool outputs will have a status of 'requires_action' with a required_action.type of @@ -2283,27 +2297,22 @@ def submit_tool_outputs_to_run( :type thread_id: str :param run_id: Required. :type run_id: str - :param body: Required. - :type body: JSON + :keyword tool_outputs: Required. + :paramtype tool_outputs: list[~azure.ai.projects.models.ToolOutput] :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str + :keyword event_handler: The event handler to use for processing events during the run. Default + value is None. + :paramtype event_handler: ~azure.ai.projects.models.AgentEventHandler :return: ThreadRun. The ThreadRun is compatible with MutableMapping :rtype: ~azure.ai.projects.models.ThreadRun :raises ~azure.core.exceptions.HttpResponseError: """ - # pylint: disable=arguments-differ @overload - def submit_tool_outputs_to_run( # pylint: disable=arguments-differ - self, - thread_id: str, - run_id: str, - *, - tool_outputs: List[_models.ToolOutput], - content_type: str = "application/json", - event_handler: Optional[_models.AgentEventHandler] = None, - **kwargs: Any, + def submit_tool_outputs_to_run( + self, thread_id: str, run_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any ) -> _models.ThreadRun: """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool outputs will have a status of 'requires_action' with a required_action.type of @@ -2313,14 +2322,11 @@ def submit_tool_outputs_to_run( # pylint: disable=arguments-differ :type thread_id: str :param run_id: Required. :type run_id: str - :keyword tool_outputs: Required. - :paramtype tool_outputs: list[~azure.ai.projects.models.ToolOutput] + :param body: Required. + :type body: JSON :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str - :keyword event_handler: The event handler to use for processing events during the run. Default - value is None. - :paramtype event_handler: ~azure.ai.projects.models.AgentEventHandler :return: ThreadRun. The ThreadRun is compatible with MutableMapping :rtype: ~azure.ai.projects.models.ThreadRun :raises ~azure.core.exceptions.HttpResponseError: @@ -2531,12 +2537,18 @@ def _handle_submit_tool_outputs(self, run: _models.ThreadRun, event_handler: _mo event_handler=event_handler, ) + # pylint: disable=arguments-differ @overload - def upload_file(self, body: JSON, **kwargs: Any) -> _models.OpenAIFile: + def upload_file( # pylint: disable=arguments-differ + self, *, file_path: str, purpose: Union[str, _models.FilePurpose], **kwargs: Any + ) -> _models.OpenAIFile: """Uploads a file for use by other operations. - :param body: Required. - :type body: JSON + :keyword file_path: Required. + :type file_path: str + :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", + "assistants_output", "batch", "batch_output", and "vision". Required. + :paramtype purpose: str or ~azure.ai.projects.models.FilePurpose :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping :rtype: ~azure.ai.projects.models.OpenAIFile :raises ~azure.core.exceptions.HttpResponseError: @@ -2561,18 +2573,12 @@ def upload_file( # pylint: disable=arguments-differ :raises ~azure.core.exceptions.HttpResponseError: """ - # pylint: disable=arguments-differ @overload - def upload_file( # pylint: disable=arguments-differ - self, *, file_path: str, purpose: Union[str, _models.FilePurpose], **kwargs: Any - ) -> _models.OpenAIFile: + def upload_file(self, body: JSON, **kwargs: Any) -> _models.OpenAIFile: """Uploads a file for use by other operations. - :keyword file_path: Required. - :type file_path: str - :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", - "assistants_output", "batch", "batch_output", and "vision". Required. - :paramtype purpose: str or ~azure.ai.projects.models.FilePurpose + :param body: Required. + :type body: JSON :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping :rtype: ~azure.ai.projects.models.OpenAIFile :raises ~azure.core.exceptions.HttpResponseError: diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_logic_apps.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_logic_apps.py index ccd78d3c434b..97a45a23e58f 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_logic_apps.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_logic_apps.py @@ -2,7 +2,7 @@ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. # ------------------------------------ - + """ DESCRIPTION: This sample demonstrates how to use agents with Logic Apps to execute the task of sending an email. @@ -33,7 +33,7 @@ triggers in the Azure Portal is "When_a_HTTP_request_is_received"). 3) - The email address of the recipient. """ - + import os import requests @@ -47,10 +47,7 @@ from user_functions import fetch_current_datetime # Import AzureLogicAppTool and the function factory from user_logic_apps -from user_logic_apps import ( - AzureLogicAppTool, - create_send_email_function -) +from user_logic_apps import AzureLogicAppTool, create_send_email_function # Create the project client project_client = AIProjectClient.from_connection_string( @@ -102,15 +99,12 @@ message = project_client.agents.create_message( thread_id=thread.id, role="user", - content="Hello, please send an email to with the date and time in '%Y-%m-%d %H:%M:%S' format.", + content="Hello, please send an email to with the date and time in '%Y-%m-%d %H:%M:%S' format.", ) print(f"Created message, ID: {message.id}") # Create and process an agent run in the thread - run = project_client.agents.create_and_process_run( - thread_id=thread.id, - assistant_id=agent.id - ) + run = project_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id) print(f"Run finished with status: {run.status}") if run.status == "failed": @@ -122,4 +116,4 @@ # Fetch and log all messages messages = project_client.agents.list_messages(thread_id=thread.id) - print(f"Messages: {messages}") \ No newline at end of file + print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-projects/samples/agents/user_logic_apps.py b/sdk/ai/azure-ai-projects/samples/agents/user_logic_apps.py index c3ee94f3810b..979fd5eca143 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/user_logic_apps.py +++ b/sdk/ai/azure-ai-projects/samples/agents/user_logic_apps.py @@ -18,7 +18,7 @@ def __init__(self, subscription_id: str, resource_group: str, credential=None): self.subscription_id = subscription_id self.resource_group = resource_group self.logic_client = LogicManagementClient(credential, subscription_id) - + self.callback_urls: Dict[str, str] = {} def register_logic_app(self, logic_app_name: str, trigger_name: str) -> None: @@ -34,7 +34,7 @@ def register_logic_app(self, logic_app_name: str, trigger_name: str) -> None: if callback.value is None: raise ValueError(f"No callback URL returned for Logic App '{logic_app_name}'.") - + self.callback_urls[logic_app_name] = callback.value def invoke_logic_app(self, logic_app_name: str, payload: Dict[str, Any]) -> Dict[str, Any]: @@ -51,12 +51,7 @@ def invoke_logic_app(self, logic_app_name: str, payload: Dict[str, Any]) -> Dict if response.ok: return {"result": f"Successfully invoked {logic_app_name}."} else: - return { - "error": ( - f"Error invoking {logic_app_name} " - f"({response.status_code}): {response.text}" - ) - } + return {"error": (f"Error invoking {logic_app_name} " f"({response.status_code}): {response.text}")} def create_send_email_function(service: AzureLogicAppTool, logic_app_name: str) -> Callable[[str, str, str], str]: @@ -64,6 +59,7 @@ def create_send_email_function(service: AzureLogicAppTool, logic_app_name: str) Returns a function that sends an email by invoking the specified Logic App in LogicAppService. This keeps the LogicAppService instance out of global scope by capturing it in a closure. """ + def send_email_via_logic_app(recipient: str, subject: str, body: str) -> str: """ Sends an email by invoking the specified Logic App with the given recipient, subject, and body. diff --git a/sdk/ai/azure-ai-projects/tsp-location.yaml b/sdk/ai/azure-ai-projects/tsp-location.yaml index f05c76ce3b88..2e8d2adc8a0a 100644 --- a/sdk/ai/azure-ai-projects/tsp-location.yaml +++ b/sdk/ai/azure-ai-projects/tsp-location.yaml @@ -1,4 +1,4 @@ directory: specification/ai/Azure.AI.Projects -commit: 84f3f74acf7fd611b4cfc8235792264f4e832300 +commit: 6e507701253408679175e95176995c437f8e00d4 repo: Azure/azure-rest-api-specs additionalDirectories: From 6e07826ba7aa1a50d6b4971be783312e9a44dec9 Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Mon, 3 Feb 2025 16:08:20 -0800 Subject: [PATCH 12/16] First --- sdk/ai/azure-ai-projects/README.md | 2 +- .../sample_inference_client_from_connection_async.py | 2 +- .../sample_inference_client_from_connection.py | 2 +- ...at_completions_with_azure_ai_inference_client_async.py | 2 +- ...ple_chat_completions_with_azure_ai_inference_client.py | 2 +- ...azure_ai_inference_client_and_azure_monitor_tracing.py | 2 +- ..._with_azure_ai_inference_client_and_console_tracing.py | 2 +- .../tests/connections/connection_test_base.py | 4 ++-- .../azure-ai-projects/tests/inference/test_inference.py | 8 ++++---- .../tests/inference/test_inference_async.py | 8 ++++---- 10 files changed, 17 insertions(+), 17 deletions(-) diff --git a/sdk/ai/azure-ai-projects/README.md b/sdk/ai/azure-ai-projects/README.md index ba5c14afcc05..df1eea26ffa6 100644 --- a/sdk/ai/azure-ai-projects/README.md +++ b/sdk/ai/azure-ai-projects/README.md @@ -206,7 +206,7 @@ inference_client = project_client.inference.get_chat_completions_client() response = inference_client.complete( model="gpt-4o", # Model deployment name - messages=[UserMessage(content="How many feet are in a mile?")] + messages=[UserMessage("How many feet are in a mile?")] ) print(response.choices[0].message.content) diff --git a/sdk/ai/azure-ai-projects/samples/connections/async_samples/sample_inference_client_from_connection_async.py b/sdk/ai/azure-ai-projects/samples/connections/async_samples/sample_inference_client_from_connection_async.py index 20c9d5edb311..9adc005c135b 100644 --- a/sdk/ai/azure-ai-projects/samples/connections/async_samples/sample_inference_client_from_connection_async.py +++ b/sdk/ai/azure-ai-projects/samples/connections/async_samples/sample_inference_client_from_connection_async.py @@ -119,7 +119,7 @@ async def sample_inference_client_from_connection() -> None: raise ValueError(f"Authentication type {connection.authentication_type} not supported.") inference_response = await inference_client.complete( - model=model_deployment_name, messages=[UserMessage(content="How many feet are in a mile?")] + model=model_deployment_name, messages=[UserMessage("How many feet are in a mile?")] ) await inference_client.close() print(inference_response.choices[0].message.content) diff --git a/sdk/ai/azure-ai-projects/samples/connections/sample_inference_client_from_connection.py b/sdk/ai/azure-ai-projects/samples/connections/sample_inference_client_from_connection.py index 3b8dafd613b4..e7f188b8a701 100644 --- a/sdk/ai/azure-ai-projects/samples/connections/sample_inference_client_from_connection.py +++ b/sdk/ai/azure-ai-projects/samples/connections/sample_inference_client_from_connection.py @@ -113,7 +113,7 @@ raise ValueError(f"Authentication type {connection.authentication_type} not supported.") inference_response = inference_client.complete( - model=model_deployment_name, messages=[UserMessage(content="How many feet are in a mile?")] + model=model_deployment_name, messages=[UserMessage("How many feet are in a mile?")] ) inference_client.close() print(inference_response.choices[0].message.content) diff --git a/sdk/ai/azure-ai-projects/samples/inference/async_samples/sample_chat_completions_with_azure_ai_inference_client_async.py b/sdk/ai/azure-ai-projects/samples/inference/async_samples/sample_chat_completions_with_azure_ai_inference_client_async.py index e3093ea0ff35..f10bd55820e7 100644 --- a/sdk/ai/azure-ai-projects/samples/inference/async_samples/sample_chat_completions_with_azure_ai_inference_client_async.py +++ b/sdk/ai/azure-ai-projects/samples/inference/async_samples/sample_chat_completions_with_azure_ai_inference_client_async.py @@ -42,7 +42,7 @@ async def sample_get_chat_completions_client_async(): async with await project_client.inference.get_chat_completions_client() as client: response = await client.complete( - model=model_deployment_name, messages=[UserMessage(content="How many feet are in a mile?")] + model=model_deployment_name, messages=[UserMessage("How many feet are in a mile?")] ) print(response.choices[0].message.content) diff --git a/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_ai_inference_client.py b/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_ai_inference_client.py index 713a1e34eec7..b8e976b5458a 100644 --- a/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_ai_inference_client.py +++ b/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_ai_inference_client.py @@ -37,7 +37,7 @@ with project_client.inference.get_chat_completions_client() as client: response = client.complete( - model=model_deployment_name, messages=[UserMessage(content="How many feet are in a mile?")] + model=model_deployment_name, messages=[UserMessage("How many feet are in a mile?")] ) print(response.choices[0].message.content) diff --git a/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_ai_inference_client_and_azure_monitor_tracing.py b/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_ai_inference_client_and_azure_monitor_tracing.py index e7431eb6d626..ecc7f75b96c0 100644 --- a/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_ai_inference_client_and_azure_monitor_tracing.py +++ b/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_ai_inference_client_and_azure_monitor_tracing.py @@ -53,7 +53,7 @@ with project_client.inference.get_chat_completions_client() as client: response = client.complete( - model=model_deployment_name, messages=[UserMessage(content="How many feet are in a mile?")] + model=model_deployment_name, messages=[UserMessage("How many feet are in a mile?")] ) print(response.choices[0].message.content) diff --git a/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_ai_inference_client_and_console_tracing.py b/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_ai_inference_client_and_console_tracing.py index 2f902b355e55..74064ef8acc2 100644 --- a/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_ai_inference_client_and_console_tracing.py +++ b/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_ai_inference_client_and_console_tracing.py @@ -51,7 +51,7 @@ with project_client.inference.get_chat_completions_client() as client: response = client.complete( - model=model_deployment_name, messages=[UserMessage(content="How many feet are in a mile?")] + model=model_deployment_name, messages=[UserMessage("How many feet are in a mile?")] ) print(response.choices[0].message.content) diff --git a/sdk/ai/azure-ai-projects/tests/connections/connection_test_base.py b/sdk/ai/azure-ai-projects/tests/connections/connection_test_base.py index 7096f87493be..657c2c39eb00 100644 --- a/sdk/ai/azure-ai-projects/tests/connections/connection_test_base.py +++ b/sdk/ai/azure-ai-projects/tests/connections/connection_test_base.py @@ -180,7 +180,7 @@ def validate_inference( raise ValueError(f"Authentication type {connection.authentication_type} not supported.") inference_response = inference_client.complete( - model=model_deployment_name, messages=[UserMessage(content="How many feet are in a mile?")] + model=model_deployment_name, messages=[UserMessage("How many feet are in a mile?")] ) print("\nChatCompletionsClient response:") pprint.pprint(inference_response) @@ -264,7 +264,7 @@ async def validate_async_inference( raise ValueError(f"Authentication type {connection.authentication_type} not supported.") inference_response = await inference_client.complete( - model=model_deployment_name, messages=[UserMessage(content="How many feet are in a mile?")] + model=model_deployment_name, messages=[UserMessage("How many feet are in a mile?")] ) print("\nChatCompletionsClient response:") pprint.pprint(inference_response) diff --git a/sdk/ai/azure-ai-projects/tests/inference/test_inference.py b/sdk/ai/azure-ai-projects/tests/inference/test_inference.py index 3860c7161917..4ad7c97fd203 100644 --- a/sdk/ai/azure-ai-projects/tests/inference/test_inference.py +++ b/sdk/ai/azure-ai-projects/tests/inference/test_inference.py @@ -105,8 +105,8 @@ def test_inference_get_chat_completions_client_key_auth(self, **kwargs): response = chat_completions_client.complete( model=model, messages=[ - SystemMessage(content="You are a helpful assistant."), - UserMessage(content="How many feet are in a mile?"), + SystemMessage("You are a helpful assistant."), + UserMessage("How many feet are in a mile?"), ], raw_request_hook=self.request_callback, ) @@ -128,8 +128,8 @@ def test_inference_get_chat_completions_client_entra_id_auth(self, **kwargs): response = chat_completions_client.complete( model=model, messages=[ - SystemMessage(content="You are a helpful assistant."), - UserMessage(content="How many feet are in a mile?"), + SystemMessage("You are a helpful assistant."), + UserMessage("How many feet are in a mile?"), ], raw_request_hook=self.request_callback, ) diff --git a/sdk/ai/azure-ai-projects/tests/inference/test_inference_async.py b/sdk/ai/azure-ai-projects/tests/inference/test_inference_async.py index 11dec67b0896..c22f78520687 100644 --- a/sdk/ai/azure-ai-projects/tests/inference/test_inference_async.py +++ b/sdk/ai/azure-ai-projects/tests/inference/test_inference_async.py @@ -108,8 +108,8 @@ async def test_inference_get_chat_completions_client_key_auth_async(self, **kwar response = await chat_completions_client.complete( model=model, messages=[ - SystemMessage(content="You are a helpful assistant."), - UserMessage(content="How many feet are in a mile?"), + SystemMessage("You are a helpful assistant."), + UserMessage("How many feet are in a mile?"), ], raw_request_hook=self.request_callback, ) @@ -131,8 +131,8 @@ async def test_inference_get_chat_completions_client_entra_id_auth_async(self, * response = await chat_completions_client.complete( model=model, messages=[ - SystemMessage(content="You are a helpful assistant."), - UserMessage(content="How many feet are in a mile?"), + SystemMessage("You are a helpful assistant."), + UserMessage("How many feet are in a mile?"), ], raw_request_hook=self.request_callback, ) From 3eb0eb3cdebae1976fa49539dd308b5c1d5eec8f Mon Sep 17 00:00:00 2001 From: Jarno Hakulinen Date: Tue, 4 Feb 2025 20:11:33 -0900 Subject: [PATCH 13/16] Jhakulin/b6 updates (#39548) * update openapi sample and tool * update sync and aio methods for create vector store --- .../ai/projects/aio/operations/_patch.py | 125 ++++++++++++------ .../azure/ai/projects/models/_patch.py | 77 ++++++++++- .../azure/ai/projects/operations/_patch.py | 13 +- .../samples/agents/countries.json | 46 +++++++ .../samples/agents/sample_agents_openapi.py | 36 ++++- 5 files changed, 242 insertions(+), 55 deletions(-) create mode 100644 sdk/ai/azure-ai-projects/samples/agents/countries.json diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py index fa48d47767fb..bdf51340722a 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py @@ -2338,7 +2338,7 @@ async def _handle_submit_tool_outputs( if toolset: tool_outputs = await toolset.execute_tool_calls(tool_calls) else: - logger.warning("Toolset is not available in the client.") + logger.debug("Toolset is not available in the client.") return logger.info("Tool outputs: %s", tool_outputs) @@ -2453,10 +2453,10 @@ async def upload_file( raise ValueError("Invalid parameters for upload_file. Please provide the necessary arguments.") @overload - async def upload_file_and_poll(self, *, body: JSON, sleep_interval: float = 1, **kwargs: Any) -> _models.OpenAIFile: + async def upload_file_and_poll(self, body: JSON, *, sleep_interval: float = 1, **kwargs: Any) -> _models.OpenAIFile: """Uploads a file for use by other operations. - :keyword body: Required. + :param body: Required. :type body: JSON :keyword sleep_interval: Time to wait before polling for the status of the uploaded file. Default value is 1. @@ -2650,7 +2650,7 @@ async def create_vector_store_and_poll( @distributed_trace_async async def create_vector_store_and_poll( self, - body: Union[JSON, IO[bytes], None] = None, + body: Union[JSON, IO[bytes]] = _Unset, *, content_type: str = "application/json", file_ids: Optional[List[str]] = None, @@ -2666,7 +2666,7 @@ async def create_vector_store_and_poll( :param body: Is either a JSON type or a IO[bytes] type. Required. :type body: JSON or IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str :keyword file_ids: A list of file IDs that the vector store should use. Useful for tools like @@ -2694,29 +2694,41 @@ async def create_vector_store_and_poll( :raises ~azure.core.exceptions.HttpResponseError: """ - if body is not None: - vector_store = await self.create_vector_store(body=body, content_type=content_type, **kwargs) - elif file_ids is not None or data_sources is not None or (name is not None and expires_after is not None): - store_configuration = _models.VectorStoreConfiguration(data_sources=data_sources) if data_sources else None - vector_store = await self.create_vector_store( - content_type=content_type, + if body is not _Unset: + if isinstance(body, dict): + vector_store = await super().create_vector_store( + body=body, + content_type=content_type or "application/json", + **kwargs + ) + elif isinstance(body, io.IOBase): + vector_store = await super().create_vector_store( + body=body, + content_type=content_type, + **kwargs + ) + else: + raise ValueError( + "Invalid 'body' type: must be a dictionary (JSON) or a file-like object (IO[bytes])." + ) + else: + store_configuration = None + if data_sources: + store_configuration = _models.VectorStoreConfiguration(data_sources=data_sources) + + vector_store = await super().create_vector_store( file_ids=file_ids, - name=name, store_configuration=store_configuration, + name=name, expires_after=expires_after, chunking_strategy=chunking_strategy, metadata=metadata, - **kwargs, - ) - else: - raise ValueError( - "Invalid parameters for create_vector_store_and_poll. Please provide either 'body', " - "'file_ids', 'store_configuration', or 'name' and 'expires_after'." + **kwargs ) while vector_store.status == "in_progress": time.sleep(sleep_interval) - vector_store = await self.get_vector_store(vector_store.id) + vector_store = await super().get_vector_store(vector_store.id) return vector_store @@ -2752,7 +2764,7 @@ async def create_vector_store_file_batch_and_poll( self, vector_store_id: str, *, - file_ids: List[str], + file_ids: Optional[List[str]] = None, data_sources: Optional[List[_models.VectorStoreDataSource]] = None, content_type: str = "application/json", chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, @@ -2812,11 +2824,12 @@ async def create_vector_store_file_batch_and_poll( async def create_vector_store_file_batch_and_poll( self, vector_store_id: str, - body: Union[JSON, IO[bytes], None] = None, + body: Union[JSON, IO[bytes]] = _Unset, *, file_ids: Optional[List[str]] = None, data_sources: Optional[List[_models.VectorStoreDataSource]] = None, chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, + content_type: str = "application/json", sleep_interval: float = 1, **kwargs: Any, ) -> _models.VectorStoreFileBatch: @@ -2833,6 +2846,8 @@ async def create_vector_store_file_batch_and_poll( :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will use the auto strategy. Default value is None. :paramtype chunking_strategy: ~azure.ai.projects.models.VectorStoreChunkingStrategyRequest + :keyword content_type: Body parameter content-type. Defaults to "application/json". + :paramtype content_type: str :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value is 1. :paramtype sleep_interval: float @@ -2841,7 +2856,26 @@ async def create_vector_store_file_batch_and_poll( :raises ~azure.core.exceptions.HttpResponseError: """ - if body is None: + if body is not _Unset: + if isinstance(body, dict): + vector_store_file_batch = await super().create_vector_store_file_batch( + vector_store_id=vector_store_id, + body=body, + content_type=content_type or "application/json", + **kwargs, + ) + elif isinstance(body, io.IOBase): + vector_store_file_batch = await super().create_vector_store_file_batch( + vector_store_id=vector_store_id, + body=body, + content_type=content_type, + **kwargs, + ) + else: + raise ValueError( + "Invalid type for 'body'. Must be a dict (JSON) or file-like (IO[bytes])." + ) + else: vector_store_file_batch = await super().create_vector_store_file_batch( vector_store_id=vector_store_id, file_ids=file_ids, @@ -2849,11 +2883,6 @@ async def create_vector_store_file_batch_and_poll( chunking_strategy=chunking_strategy, **kwargs, ) - else: - content_type = kwargs.get("content_type", "application/json") - vector_store_file_batch = await super().create_vector_store_file_batch( - body=body, content_type=content_type, **kwargs - ) while vector_store_file_batch.status == "in_progress": time.sleep(sleep_interval) @@ -2897,7 +2926,7 @@ async def create_vector_store_file_and_poll( *, content_type: str = "application/json", file_id: Optional[str] = None, - data_sources: Optional[List[_models.VectorStoreDataSource]] = None, + data_source: Optional[_models.VectorStoreDataSource] = None, chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, sleep_interval: float = 1, **kwargs: Any, @@ -2911,8 +2940,8 @@ async def create_vector_store_file_and_poll( :paramtype content_type: str :keyword file_id: Identifier of the file. Default value is None. :paramtype file_id: str - :keyword data_sources: Azure asset ID. Default value is None. - :paramtype data_sources: list[~azure.ai.projects.models.VectorStoreDataSource] + :keyword data_source: Azure asset ID. Default value is None. + :paramtype data_source: ~azure.ai.projects.models.VectorStoreDataSource :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will use the auto strategy. Default value is None. :paramtype chunking_strategy: ~azure.ai.projects.models.VectorStoreChunkingStrategyRequest @@ -2957,8 +2986,9 @@ async def create_vector_store_file_and_poll( vector_store_id: str, body: Union[JSON, IO[bytes]] = _Unset, *, + content_type: str = "application/json", file_id: Optional[str] = None, - data_sources: Optional[List[_models.VectorStoreDataSource]] = None, + data_source: Optional[_models.VectorStoreDataSource] = None, chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, sleep_interval: float = 1, **kwargs: Any, @@ -2969,10 +2999,12 @@ async def create_vector_store_file_and_poll( :type vector_store_id: str :param body: Is either a JSON type or a IO[bytes] type. Required. :type body: JSON or IO[bytes] + :keyword content_type: Body Parameter content-type. Defaults to 'application/json'. + :paramtype content_type: str :keyword file_id: Identifier of the file. Default value is None. :paramtype file_id: str - :keyword data_sources: Azure asset ID. Default value is None. - :paramtype data_sources: list[~azure.ai.projects.models.VectorStoreDataSource] + :keyword data_source: Azure asset ID. Default value is None. + :paramtype data_source: ~azure.ai.projects.models.VectorStoreDataSource :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will use the auto strategy. Default value is None. :paramtype chunking_strategy: ~azure.ai.projects.models.VectorStoreChunkingStrategyRequest @@ -2983,17 +3015,34 @@ async def create_vector_store_file_and_poll( :rtype: ~azure.ai.projects.models.VectorStoreFile :raises ~azure.core.exceptions.HttpResponseError: """ - if body is None: + + if body is not _Unset: + if isinstance(body, dict): + vector_store_file = await super().create_vector_store_file( + vector_store_id=vector_store_id, + body=body, + content_type=content_type or "application/json", + **kwargs, + ) + elif isinstance(body, io.IOBase): + vector_store_file = await super().create_vector_store_file( + vector_store_id=vector_store_id, + body=body, + content_type=content_type, + **kwargs, + ) + else: + raise ValueError( + "Invalid type for 'body'. Must be a dict (JSON) or file-like object (IO[bytes])." + ) + else: vector_store_file = await super().create_vector_store_file( vector_store_id=vector_store_id, file_id=file_id, - data_sources=data_sources, + data_source=data_source, chunking_strategy=chunking_strategy, **kwargs, ) - else: - content_type = kwargs.get("content_type", "application/json") - vector_store_file = await super().create_vector_store_file(body=body, content_type=content_type, **kwargs) while vector_store_file.status == "in_progress": time.sleep(sleep_interval) diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py index b080fc26de1a..cd024ef71f5d 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py @@ -778,11 +778,23 @@ def execute(self, tool_call: Any): class OpenApiTool(Tool[OpenApiToolDefinition]): """ - A tool that retrieves information using an OpenAPI spec. + A tool that retrieves information using OpenAPI specs. + Initialized with an initial API definition (name, description, spec, auth), + this class also supports adding and removing additional API definitions dynamically. """ def __init__(self, name: str, description: str, spec: Any, auth: OpenApiAuthDetails): - self._definitions = [ + """ + Constructor initializes the tool with a primary API definition. + + :param name: The name of the API. + :param description: The API description. + :param spec: The API specification. + :param auth: Authentication details for the API. + :type auth: OpenApiAuthDetails + """ + self._default_auth = auth + self._definitions: List[OpenApiToolDefinition] = [ OpenApiToolDefinition( openapi=OpenApiFunctionDefinition(name=name, description=description, spec=spec, auth=auth) ) @@ -791,13 +803,67 @@ def __init__(self, name: str, description: str, spec: Any, auth: OpenApiAuthDeta @property def definitions(self) -> List[OpenApiToolDefinition]: """ - Get the OpenApi tool definitions. + Get the list of all API definitions for the tool. - :return: A list of tool definitions. + :return: A list of OpenAPI tool definitions. :rtype: List[ToolDefinition] """ return self._definitions + def add_definition( + self, + name: str, + description: str, + spec: Any, + auth: Optional[OpenApiAuthDetails] = None + ) -> None: + """ + Adds a new API definition dynamically. + Raises a ValueError if a definition with the same name already exists. + + :param name: The name of the API. + :type name: str + :param description: The description of the API. + :type description: str + :param spec: The API specification. + :type spec: Any + :param auth: Optional authentication details for this particular API definition. + If not provided, the tool's default authentication details will be used. + :type auth: Optional[OpenApiAuthDetails] + :raises ValueError: If a definition with the same name exists. + """ + # Check if a definition with the same name exists. + if any(definition.openapi.name == name for definition in self._definitions): + raise ValueError(f"Definition '{name}' already exists and cannot be added again.") + + # Use provided auth if specified, otherwise use default + auth_to_use = auth if auth is not None else self._default_auth + + new_definition = OpenApiToolDefinition( + openapi=OpenApiFunctionDefinition( + name=name, + description=description, + spec=spec, + auth=auth_to_use + ) + ) + self._definitions.append(new_definition) + + def remove_definition(self, name: str) -> None: + """ + Removes an API definition based on its name. + + :param name: The name of the API definition to remove. + :type name: str + :raises ValueError: If the definition with the specified name does not exist. + """ + for definition in self._definitions: + if definition.openapi.name == name: + self._definitions.remove(definition) + logging.info("Definition '%s' removed. Total definitions: %d.", name, len(self._definitions)) + return + raise ValueError(f"Definition with the name '{name}' does not exist.") + @property def resources(self) -> ToolResources: """ @@ -808,11 +874,12 @@ def resources(self) -> ToolResources: """ return ToolResources() - def execute(self, tool_call: Any): + def execute(self, tool_call: Any) -> None: """ OpenApiTool does not execute client-side. :param Any tool_call: The tool call to execute. + :type tool_call: Any """ diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py index d23a02f7bbfe..0e392c5d0ea5 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py @@ -2525,7 +2525,7 @@ def _handle_submit_tool_outputs(self, run: _models.ThreadRun, event_handler: _mo if toolset: tool_outputs = toolset.execute_tool_calls(tool_calls) else: - logger.warning("Toolset is not available in the client.") + logger.debug("Toolset is not available in the client.") return logger.info("Tool outputs: %s", tool_outputs) @@ -2780,8 +2780,8 @@ def create_vector_store_and_poll( *, content_type: str = "application/json", file_ids: Optional[List[str]] = None, - data_sources: Optional[List[_models.VectorStoreDataSource]] = None, name: Optional[str] = None, + data_sources: Optional[List[_models.VectorStoreDataSource]] = None, expires_after: Optional[_models.VectorStoreExpirationPolicy] = None, chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, metadata: Optional[Dict[str, str]] = None, @@ -2955,7 +2955,7 @@ def create_vector_store_file_batch_and_poll( vector_store_id: str, *, file_ids: Optional[List[str]] = None, - store_configuration: Optional[_models.VectorStoreConfiguration] = None, + data_sources: Optional[List[_models.VectorStoreDataSource]] = None, content_type: str = "application/json", chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, sleep_interval: float = 1, @@ -2967,9 +2967,8 @@ def create_vector_store_file_batch_and_poll( :type vector_store_id: str :keyword file_ids: List of file identifiers. Required. :paramtype file_ids: list[str] - :keyword store_configuration: The vector store configuration, used when vector store is created - from Azure asset ID. Default value is None. - :paramtype store_configuration:~azure.ai.projects.VectorStorageConfiguration + :keyword data_sources: List of Azure assets. Default value is None. + :paramtype data_sources: list[~azure.ai.projects.models.VectorStoreDataSource] :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str @@ -3033,7 +3032,7 @@ def create_vector_store_file_batch_and_poll( :keyword file_ids: List of file identifiers. Required. :paramtype file_ids: list[str] :keyword data_sources: List of Azure assets. Default value is None. - :paramtype data_sources: list[~azure.ai.client.project.VectorStoreDataSource] + :paramtype data_sources: list[~azure.ai.client.models.VectorStoreDataSource] :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will use the auto strategy. Default value is None. :paramtype chunking_strategy: ~azure.ai.projects.models.VectorStoreChunkingStrategyRequest diff --git a/sdk/ai/azure-ai-projects/samples/agents/countries.json b/sdk/ai/azure-ai-projects/samples/agents/countries.json new file mode 100644 index 000000000000..58d3df70d28d --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/countries.json @@ -0,0 +1,46 @@ +{ + "openapi": "3.1.0", + "info": { + "title": "RestCountries.NET API", + "description": "Web API version 3.1 for managing country items, based on previous implementations from restcountries.eu and restcountries.com.", + "version": "v3.1" + }, + "servers": [ + { "url": "https://restcountries.net" } + ], + "auth": [], + "paths": { + "/v3.1/currency": { + "get": { + "description": "Search by currency.", + "operationId": "LookupCountryByCurrency", + "parameters": [ + { + "name": "currency", + "in": "query", + "description": "The currency to search for.", + "required": true, + "schema": { + "type": "string" + } + } + ], + "responses": { + "200": { + "description": "Success", + "content": { + "text/plain": { + "schema": { + "type": "string" + } + } + } + } + } + } + } + }, + "components": { + "schemes": {} + } + } \ No newline at end of file diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_openapi.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_openapi.py index 48cecd3a8833..f2c742f577db 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_openapi.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_openapi.py @@ -29,6 +29,7 @@ from azure.identity import DefaultAzureCredential from azure.ai.projects.models import OpenApiTool, OpenApiAnonymousAuthDetails + project_client = AIProjectClient.from_connection_string( credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"], @@ -36,15 +37,19 @@ # [START create_agent_with_openapi] with open("./weather_openapi.json", "r") as f: - openapi_spec = jsonref.loads(f.read()) + openapi_weather = jsonref.loads(f.read()) + +with open("./countries.json", "r") as f: + openapi_countries = jsonref.loads(f.read()) # Create Auth object for the OpenApiTool (note that connection or managed identity auth setup requires additional setup in Azure) auth = OpenApiAnonymousAuthDetails() # Initialize agent OpenApi tool using the read in OpenAPI spec -openapi = OpenApiTool( - name="get_weather", spec=openapi_spec, description="Retrieve weather information for a location", auth=auth +openapi_tool = OpenApiTool( + name="get_weather", spec=openapi_weather, description="Retrieve weather information for a location", auth=auth ) +openapi_tool.add_definition(name="get_countries", spec=openapi_countries, description="Retrieve a list of countries", auth=auth) # Create agent with OpenApi tool and process assistant run with project_client: @@ -52,7 +57,7 @@ model=os.environ["MODEL_DEPLOYMENT_NAME"], name="my-assistant", instructions="You are a helpful assistant", - tools=openapi.definitions, + tools=openapi_tool.definitions, ) # [END create_agent_with_openapi] @@ -67,7 +72,7 @@ message = project_client.agents.create_message( thread_id=thread.id, role="user", - content="What's the weather in Seattle?", + content="What's the weather in Seattle and What is the name and population of the country that uses currency with abbreviation THB?", ) print(f"Created message, ID: {message.id}") @@ -78,6 +83,27 @@ if run.status == "failed": print(f"Run failed: {run.last_error}") + run_steps = project_client.agents.list_run_steps(thread_id=thread.id, run_id=run.id) + + # Loop through each step + for step in run_steps.data: + print(f"Step {step['id']} status: {step['status']}") + + # Check if there are tool calls in the step details + step_details = step.get('step_details', {}) + tool_calls = step_details.get('tool_calls', []) + + if tool_calls: + print(" Tool calls:") + for call in tool_calls: + print(f" Tool Call ID: {call.get('id')}") + print(f" Type: {call.get('type')}") + + function_details = call.get('function', {}) + if function_details: + print(f" Function name: {function_details.get('name')}") + print() # add an extra newline between steps + # Delete the assistant when done project_client.agents.delete_agent(agent.id) print("Deleted agent") From ce0642441d6c256b6b89d0dc9aba5ad665389fa6 Mon Sep 17 00:00:00 2001 From: Glenn Harper <64209257+glharper@users.noreply.github.com> Date: Thu, 6 Feb 2025 18:37:59 -0500 Subject: [PATCH 14/16] [AI] [Agents] Add sharepoint tool sample (#39487) * [AI] [Agents] Add sharepoint tool sample * Update sample_agents_sharepoint.py * Update sample_agents_sharepoint.py --- .../agents/sample_agents_sharepoint.py | 80 +++++++++++++++++++ 1 file changed, 80 insertions(+) create mode 100644 sdk/ai/azure-ai-projects/samples/agents/sample_agents_sharepoint.py diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_sharepoint.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_sharepoint.py new file mode 100644 index 000000000000..e95c84512670 --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_sharepoint.py @@ -0,0 +1,80 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +FILE: sample_agents_sharepoint.py + +DESCRIPTION: + This sample demonstrates how to use agent operations with the + Sharepoint tool from the Azure Agents service using a synchronous client. + The sharepoint tool is currently available only to whitelisted customers. + For access and onboarding instructions, please contact azureagents-preview@microsoft.com. + +USAGE: + python sample_agents_sharepoint.py + + Before running the sample: + + pip install azure-ai-projects azure-identity + + Set this environment variables with your own values: + PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. +""" + +import os +from azure.ai.projects import AIProjectClient +from azure.identity import DefaultAzureCredential +from azure.ai.projects.models import SharepointTool + + +# Create an Azure AI Client from a connection string, copied from your AI Studio project. +# At the moment, it should be in the format ";;;" +# Customer needs to login to Azure subscription via Azure CLI and set the environment variables + +project_client = AIProjectClient.from_connection_string( + credential=DefaultAzureCredential(), + conn_str=os.environ["PROJECT_CONNECTION_STRING"], +) + +# Initialize Sharepoint tool with connection id +sharepoint = SharepointTool(connection_id="sharepoint_connection_name") + +# Create agent with Sharepoint tool and process assistant run +with project_client: + agent = project_client.agents.create_agent( + model=os.environ["MODEL_NAME"], + name="my-assistant", + instructions="You are a helpful assistant", + tools=sharepoint.definitions, + headers={"x-ms-enable-preview": "true"}, + ) + print(f"Created agent, ID: {agent.id}") + + # Create thread for communication + thread = project_client.agents.create_thread() + print(f"Created thread, ID: {thread.id}") + + # Create message to thread + message = project_client.agents.create_message( + thread_id=thread.id, + role="user", + content="Hello, summarize the key points of the ", + ) + print(f"Created message, ID: {message.id}") + + # Create and process agent run in thread with tools + run = project_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id) + print(f"Run finished with status: {run.status}") + + if run.status == "failed": + print(f"Run failed: {run.last_error}") + + # Delete the assistant when done + project_client.agents.delete_agent(agent.id) + print("Deleted agent") + + # Fetch and log all messages + messages = project_client.agents.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") From e281d525435a73433e476d5786a8865c4174442a Mon Sep 17 00:00:00 2001 From: nick863 <30440255+nick863@users.noreply.github.com> Date: Fri, 7 Feb 2025 14:02:07 -0800 Subject: [PATCH 15/16] Add the readme section on how to deploy function and record tests. (#39589) * Record the tests for functions and write how to deploy azure function in the readme. * Describe creation of Azure function for python * Fix * Fix typos * Fix --- sdk/ai/azure-ai-projects/README.md | 102 +++++++++++++++++- sdk/ai/azure-ai-projects/assets.json | 2 +- .../tests/agents/test_agents_client.py | 9 +- .../tests/agents/test_agents_client_async.py | 7 +- 4 files changed, 111 insertions(+), 9 deletions(-) diff --git a/sdk/ai/azure-ai-projects/README.md b/sdk/ai/azure-ai-projects/README.md index ba5c14afcc05..648f99c882fb 100644 --- a/sdk/ai/azure-ai-projects/README.md +++ b/sdk/ai/azure-ai-projects/README.md @@ -535,7 +535,7 @@ agent = await project_client.agents.create_agent( #### Create Agent With Azure Function Call -The agent can handle Azure Function calls on the service side and return the result of the call. To use the function we need to create the `AzureFunctionTool`, which contains the input and output queues of azure function and the description of input parameters. Please note that in the prompt we are asking the model to invoke queue when the specific question ("What would foo say?") is being asked. +The agent can handle Azure Function calls on the service side and return the result of the call. To use the function, we need to create the `AzureFunctionTool`, which contains the input and output queues of Azure function and the description of input parameters. Please note that in the prompt we are asking the model to invoke queue when the specific question ("What would foo say?") is being asked. See below for the instructions on function deployment. @@ -571,6 +571,106 @@ print(f"Created agent, agent ID: {agent.id}") +To make a function call we need to create and deploy the Azure function. In the code snippet below, we have an example of function on python which can be used by the code above. + +```python +import azure.functions as func +import json + +from urllib.parse import urlparse +from azure.identity import DefaultAzureCredential +from azure.storage.queue import ( + QueueClient, + BinaryBase64EncodePolicy, + BinaryBase64DecodePolicy +) + +app = func.FunctionApp() + + +@app.function_name(name="Foo") +@app.queue_trigger( + arg_name="arguments", + queue_name="azure-function-foo-input", + connection="AzureWebJobsStorage") +def foo(arguments: func.QueueMessage) -> None: + """ + The function, answering question. + + :param arguments: The arguments, containing json serialized request. + """ + parsed_args = json.loads(arguments.get_body().decode('utf-8')) + queue_url = urlparse(parsed_args['outputqueueuri']) + + queue_client = QueueClient( + f"{queue_url.scheme}://{queue_url.netloc}", + queue_name=queue_url.path[1:], + credential=DefaultAzureCredential(), + message_encode_policy=BinaryBase64EncodePolicy(), + message_decode_policy=BinaryBase64DecodePolicy() + ) + + response = { + "Value": "Bar", + "CorrelationId": parsed_args['CorrelationId'] + } + queue_client.send_message(json.dumps(response).encode('utf-8')) + +``` + +In this code we define function input and output class: `Arguments` and `Response` respectively. These two data classes will be serialized in JSON. It is important that these both contain field `CorrelationId`, which is the same between input and output. + +In our example the function will be stored in the storage account, created with the AI hub. For that we need to allow key access to that storage. In Azure portal go to Storage account > Settings > Configuration and set "Allow storage account key access" to Enabled. If it is not done, the error will be displayed "The remote server returned an error: (403) Forbidden." +Before creation of the function we will need to get the python version using command `python --version`. We recommend to use python version 3.11 or above. We will need only two major digits in the next command, which deploys function and installs azure-cli: + +```shell +pip install -U azure-cli +az login +az functionapp create --resource-group your-resource-group --consumption-plan-location region --runtime python --runtime-version 3.11 --functions-version 4 --name function_name --os-type linux --storage-account storage_account_already_present_in_resource_group --app-insights existing_or_new_application_insights_name +``` + +This function writes data to the output queue and hence needs to be authenticated to Azure, so we will need to assign the function system identity and provide it `Storage Queue Data Contributor`. To do that in Azure portal select the function, located in `your-resource-group` resource group and in Settings>Identity, switch it on and click Save. After that assign the `Storage Queue Data Contributor` permission on storage account used by our function (`storage_account_already_present_in_resource_group` in the script above) for just assigned System Managed identity. +**Note:** in python we need to provide the explicit queue connection. It is defined in the line `connection="AzureWebJobsStorage".`. `AzureWebJobsStorage` is a setting, which can be viewed in `function_name` Settings>Environment variables>AzureWebJobsStorage and will look like DefaultEndpointsProtocol=https;EndpointSuffix=core.windows.net;AccountName=storage_account_already_present_in_resource_group;AccountKey=xxxxx. Another option is to provide the Managed identity. In this case we will need to set `connection` to prefix, present in three environment variables. For example, `connection=STORAGE_CONNECTION` and the variables will be: `STORAGE_CONNECTION__clientId` (managed entity UUID), `STORAGE_CONNECTION__credential` ("managedidentity") and `STORAGE_CONNECTION__queueServiceUri` (the URI of storage account, for example https://storage_account_already_present_in_resource_group.queue.core.windows.net). + +Now we will create the function itself. Install [.NET](https://dotnet.microsoft.com/download) and [Core Tools](https://go.microsoft.com/fwlink/?linkid=2174087) and create the function project using next commands. +``` +func init FunctionProj --worker-runtime python +cd FunctionProj +# Use the next line in cmd +echo.azure-identity>> requirements.txt +echo.azure-storage-queue>> requirements.txt +# Use the next line in PowerShell +echo "azure-identity" >> requirements.txt +echo "azure-storage-queue" >> requirements.txt +``` +If you are working in virtual environment and the virtual environment folder is located in FunctionProj, make sure, the name of this folder is added to `.funcignore` as all the directory content will be uploaded to Azure. + +Rename function_app.py to foo.py and replace the content with the code above. To deploy the function run the command from dotnet project folder: + +``` +func azure functionapp publish function_name +``` + +In the `storage_account_already_present_in_resource_group` select the `Queue service` and create two queues: `azure-function-foo-input` and `azure-function-tool-output`. Note that the same queues are used in our sample. To check that the function is working, place the next message into the `azure-function-foo-input` and replace `storage_account_already_present_in_resource_group` by the actual resource group name, or just copy the output queue address. Note in python `outputqueueuri` is written in all lower case letters. +```json +{ + "outputqueueuri": "https://storage_account_already_present_in_resource_group.queue.core.windows.net/azure-function-tool-output", + "CorrelationId": "42" +} +``` + +Next, we will monitor the output queue or the message. You should receive the next message. +```json +{ + "Value": "Bar", + "CorrelationId": "42" +} +``` +Please note that the input `CorrelationId` is the same as output. +*Hint:* Place multiple messages to input queue and keep second internet browser window with the output queue open and hit the refresh button on the portal user interface, so that you will not miss the message. If the message instead went to `azure-function-foo-input-poison` queue, the function completed with error, please check your setup. +After we have tested the function and made sure it works, please make sure that the Azure AI Project have the following roles for the storage account: `Storage Account Contributor`, `Storage Blob Data Contributor`, `Storage File Data Privileged Contributor`, `Storage Queue Data Contributor` and `Storage Table Data Contributor`. Now the function is ready to be used by the agent. + + #### Create Agent With Logic Apps Logic Apps allow HTTP requests to trigger actions. For more information, refer to the guide [Logic App Workflows for Function Calling](https://learn.microsoft.com/azure/ai-services/openai/how-to/assistants-logic-apps#create-logic-apps-workflows-for-function-calling). diff --git a/sdk/ai/azure-ai-projects/assets.json b/sdk/ai/azure-ai-projects/assets.json index 2483d0b9f212..5f3548e3435f 100644 --- a/sdk/ai/azure-ai-projects/assets.json +++ b/sdk/ai/azure-ai-projects/assets.json @@ -2,5 +2,5 @@ "AssetsRepo": "Azure/azure-sdk-assets", "AssetsRepoPrefixPath": "python", "TagPrefix": "python/ai/azure-ai-projects", - "Tag": "python/ai/azure-ai-projects_55ba14b3a7" + "Tag": "python/ai/azure-ai-projects_85b26dc606" } diff --git a/sdk/ai/azure-ai-projects/tests/agents/test_agents_client.py b/sdk/ai/azure-ai-projects/tests/agents/test_agents_client.py index 6fed0a59ba6b..6862bad9744b 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/test_agents_client.py +++ b/sdk/ai/azure-ai-projects/tests/agents/test_agents_client.py @@ -3022,10 +3022,11 @@ def test_code_interpreter_and_save_file(self, **kwargs): assert output_file_exist @agentClientPreparer() - @pytest.mark.skip("New test, will need recording in future.") @recorded_by_proxy def test_azure_function_call(self, **kwargs): """Test calling Azure functions.""" + # Note: This test was recorded in westus region as for now + # 2025-02-05 it is not supported in test region (East US 2) # create client storage_queue = kwargs["azure_ai_projects_agents_tests_storage_queue"] with self.create_client(**kwargs) as client: @@ -3079,10 +3080,10 @@ def test_azure_function_call(self, **kwargs): assert run.status == RunStatus.COMPLETED, f"The run is in {run.status} state." # Get messages from the thread - messages = client.agents.get_messages(thread_id=thread.id) - assert len(messages.text_messages), "No messages were received." + messages = client.agents.list_messages(thread_id=thread.id) + assert len(messages.text_messages) > 1, "No messages were received from agent." - # Chech that we have function response in at least one message. + # Check that we have function response in at least one message. assert any("bar" in msg.text.value.lower() for msg in messages.text_messages) # Delete the agent once done diff --git a/sdk/ai/azure-ai-projects/tests/agents/test_agents_client_async.py b/sdk/ai/azure-ai-projects/tests/agents/test_agents_client_async.py index 7aa41ac4a53b..a1dcb52ce881 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/test_agents_client_async.py +++ b/sdk/ai/azure-ai-projects/tests/agents/test_agents_client_async.py @@ -2693,10 +2693,11 @@ async def _do_test_create_attachment_in_thread_azure(self, **kwargs): await ai_client.close() @agentClientPreparer() - @pytest.mark.skip("New test, will need recording in future.") @recorded_by_proxy_async async def test_azure_function_call(self, **kwargs): """Test calling Azure functions.""" + # Note: This test was recorded in westus region as for now + # 2025-02-05 it is not supported in test region (East US 2) # create client storage_queue = kwargs["azure_ai_projects_agents_tests_storage_queue"] async with self.create_client(**kwargs) as client: @@ -2750,8 +2751,8 @@ async def test_azure_function_call(self, **kwargs): assert run.status == RunStatus.COMPLETED, f"The run is in {run.status} state." # Get messages from the thread - messages = await client.agents.get_messages(thread_id=thread.id) - assert len(messages.text_messages), "No messages were received." + messages = await client.agents.list_messages(thread_id=thread.id) + assert len(messages.text_messages) > 1, "No messages were received from agent." # Chech that we have function response in at least one message. assert any("bar" in msg.text.value.lower() for msg in messages.text_messages) From 24d2b6b66a832efba0fbe7ad4b91b6d958f9df7d Mon Sep 17 00:00:00 2001 From: M-Hietala <78813398+M-Hietala@users.noreply.github.com> Date: Mon, 10 Feb 2025 13:32:34 -0600 Subject: [PATCH 16/16] M hietala/adding function tracing conveniency (#39368) * adding function tracing conveniency * updated documentation * reformta * update * change to restart ci * updating implementation * updating samples * adding unit tests and fixes * removing renamed file * adding unit test files * adding safeguard for the decorator to be no-op when otel not available * formatting changes done by black tool * pylint fixes * mypy fixes * fix a few tool errors * adding exception qualname to exception trace * updating change log --------- Co-authored-by: Marko Hietala --- sdk/ai/azure-ai-projects/CHANGELOG.md | 2 + .../azure/ai/projects/telemetry/__init__.py | 13 + .../ai/projects/telemetry/_trace_function.py | 204 ++++++++++ .../async_samples/user_async_functions.py | 31 +- ...ts_functions_with_azure_monitor_tracing.py | 5 +- ...e_agents_functions_with_console_tracing.py | 5 +- .../tests/telemetry/gen_ai_trace_verifier.py | 70 +++- .../test_function_trace_decorator.py | 368 ++++++++++++++++++ .../test_function_trace_decorator_async.py | 50 +++ 9 files changed, 734 insertions(+), 14 deletions(-) create mode 100644 sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/_trace_function.py create mode 100644 sdk/ai/azure-ai-projects/tests/telemetry/test_function_trace_decorator.py create mode 100644 sdk/ai/azure-ai-projects/tests/telemetry/test_function_trace_decorator_async.py diff --git a/sdk/ai/azure-ai-projects/CHANGELOG.md b/sdk/ai/azure-ai-projects/CHANGELOG.md index cd294dc42e03..363382a6f357 100644 --- a/sdk/ai/azure-ai-projects/CHANGELOG.md +++ b/sdk/ai/azure-ai-projects/CHANGELOG.md @@ -4,6 +4,8 @@ ### Features added +* Added trace_function decorator which can be used to trace functions + ### Bugs Fixed ### Breaking Changes diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/__init__.py b/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/__init__.py index d55ccad1f573..a5e9e67bf233 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/__init__.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/__init__.py @@ -1 +1,14 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from ._trace_function import trace_function + +__all__ = [ + "trace_function", +] __path__ = __import__("pkgutil").extend_path(__path__, __name__) # type: ignore diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/_trace_function.py b/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/_trace_function.py new file mode 100644 index 000000000000..1890a6f1e88d --- /dev/null +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/_trace_function.py @@ -0,0 +1,204 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +import functools +import asyncio +from typing import Any, Callable, Optional, Dict + +try: + # pylint: disable = no-name-in-module + from opentelemetry import trace as opentelemetry_trace + + tracer = opentelemetry_trace.get_tracer(__name__) + _tracing_library_available = True +except ModuleNotFoundError: + _tracing_library_available = False + +if _tracing_library_available: + + def trace_function(span_name: Optional[str] = None): + """ + A decorator for tracing function calls using OpenTelemetry. + + This decorator handles various data types for function parameters and return values, + and records them as attributes in the trace span. The supported data types include: + - Basic data types: str, int, float, bool + - Collections: list, dict, tuple, set + + Special handling for collections: + - If a collection (list, dict, tuple, set) contains nested collections, the entire collection + is converted to a string before being recorded as an attribute. + - Sets and dictionaries are always converted to strings to ensure compatibility with span attributes. + + Object types are omitted, and the corresponding parameter is not traced. + + :param span_name: The name of the span. If not provided, the function name is used. + :type span_name: Optional[str] + :return: The decorated function with tracing enabled. + :rtype: Callable + """ + + def decorator(func: Callable) -> Callable: + @functools.wraps(func) + async def async_wrapper(*args: Any, **kwargs: Any) -> Any: + """ + Wrapper function for asynchronous functions. + + :param args: Positional arguments passed to the function. + :type args: Tuple[Any] + :return: The result of the decorated asynchronous function. + :rtype: Any + """ + name = span_name if span_name else func.__name__ + with tracer.start_as_current_span(name) as span: + try: + # Sanitize parameters and set them as attributes + sanitized_params = sanitize_parameters(func, *args, **kwargs) + span.set_attributes(sanitized_params) + result = await func(*args, **kwargs) + sanitized_result = sanitize_for_attributes(result) + if sanitized_result is not None: + if isinstance(sanitized_result, (list, dict, tuple, set)): + if any(isinstance(i, (list, dict, tuple, set)) for i in sanitized_result): + sanitized_result = str(sanitized_result) + span.set_attribute("code.function.return.value", sanitized_result) # type: ignore + return result + except Exception as e: + span.record_exception(e) + span.set_attribute("error.type", e.__class__.__qualname__) # type: ignore + raise + + @functools.wraps(func) + def sync_wrapper(*args: Any, **kwargs: Any) -> Any: + """ + Wrapper function for synchronous functions. + + :param args: Positional arguments passed to the function. + :type args: Tuple[Any] + :return: The result of the decorated synchronous function. + :rtype: Any + """ + name = span_name if span_name else func.__name__ + with tracer.start_as_current_span(name) as span: + try: + # Sanitize parameters and set them as attributes + sanitized_params = sanitize_parameters(func, *args, **kwargs) + span.set_attributes(sanitized_params) + result = func(*args, **kwargs) + sanitized_result = sanitize_for_attributes(result) + if sanitized_result is not None: + if isinstance(sanitized_result, (list, dict, tuple, set)): + if any(isinstance(i, (list, dict, tuple, set)) for i in sanitized_result): + sanitized_result = str(sanitized_result) + span.set_attribute("code.function.return.value", sanitized_result) # type: ignore + return result + except Exception as e: + span.record_exception(e) + span.set_attribute("error.type", e.__class__.__qualname__) # type: ignore + raise + + # Determine if the function is async + if asyncio.iscoroutinefunction(func): + return async_wrapper + return sync_wrapper + + return decorator + +else: + # Define a no-op decorator if OpenTelemetry is not available + def trace_function(span_name: Optional[str] = None): # pylint: disable=unused-argument + """ + A no-op decorator for tracing function calls when OpenTelemetry is not available. + + :param span_name: Not used in this version. + :type span_name: Optional[str] + :return: The original function. + :rtype: Callable + """ + + def decorator(func: Callable) -> Callable: + return func + + return decorator + + +def sanitize_parameters(func, *args, **kwargs) -> Dict[str, Any]: + """ + Sanitize function parameters to include only built-in data types. + + :param func: The function being decorated. + :type func: Callable + :param args: Positional arguments passed to the function. + :type args: Tuple[Any] + :return: A dictionary of sanitized parameters. + :rtype: Dict[str, Any] + """ + import inspect + + params = inspect.signature(func).parameters + sanitized_params = {} + + for i, (name, param) in enumerate(params.items()): + if param.default == inspect.Parameter.empty and i < len(args): + value = args[i] + else: + value = kwargs.get(name, param.default) + + sanitized_value = sanitize_for_attributes(value) + # Check if the collection has nested collections + if isinstance(sanitized_value, (list, dict, tuple, set)): + if any(isinstance(i, (list, dict, tuple, set)) for i in sanitized_value): + sanitized_value = str(sanitized_value) + if sanitized_value is not None: + sanitized_params["code.function.parameter." + name] = sanitized_value + + return sanitized_params + + +# pylint: disable=R0911 +def sanitize_for_attributes(value: Any, is_recursive: bool = False) -> Any: + """ + Sanitize a value to be used as an attribute. + + :param value: The value to sanitize. + :type value: Any + :param is_recursive: Indicates if the function is being called recursively. Default is False. + :type is_recursive: bool + :return: The sanitized value or None if the value is not a supported type. + :rtype: Any + """ + if isinstance(value, (str, int, float, bool)): + return value + if isinstance(value, list): + return [ + sanitize_for_attributes(item, True) + for item in value + if isinstance(item, (str, int, float, bool, list, dict, tuple, set)) + ] + if isinstance(value, dict): + retval = { + k: sanitize_for_attributes(v, True) + for k, v in value.items() + if isinstance(v, (str, int, float, bool, list, dict, tuple, set)) + } + # dict to compatible with span attribute, so return it as a string + if is_recursive: + return retval + return str(retval) + if isinstance(value, tuple): + return tuple( + sanitize_for_attributes(item, True) + for item in value + if isinstance(item, (str, int, float, bool, list, dict, tuple, set)) + ) + if isinstance(value, set): + retval_set = { + sanitize_for_attributes(item, True) + for item in value + if isinstance(item, (str, int, float, bool, list, dict, tuple, set)) + } + if is_recursive: + return retval_set + return str(retval_set) + return None diff --git a/sdk/ai/azure-ai-projects/samples/agents/async_samples/user_async_functions.py b/sdk/ai/azure-ai-projects/samples/agents/async_samples/user_async_functions.py index 35faeb1806db..057d6a07fd4b 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/async_samples/user_async_functions.py +++ b/sdk/ai/azure-ai-projects/samples/agents/async_samples/user_async_functions.py @@ -6,7 +6,10 @@ import asyncio import os import sys -from typing import Any, Callable, Set +import json +import datetime +from typing import Any, Callable, Set, Optional +from azure.ai.projects.telemetry import trace_function # Add parent directory to sys.path to import user_functions @@ -31,9 +34,33 @@ async def send_email_async(recipient: str, subject: str, body: str) -> str: return send_email(recipient, subject, body) +# The trace_func decorator will trace the function call and enable adding additional attributes +# to the span in the function implementation. Note that this will trace the function parameters and their values. +@trace_function() +async def fetch_current_datetime_async(format: Optional[str] = None) -> str: + """ + Get the current time as a JSON string, optionally formatted. + + :param format (Optional[str]): The format in which to return the current time. Defaults to None, which uses a standard format. + :return: The current time in JSON format. + :rtype: str + """ + await asyncio.sleep(1) + current_time = datetime.datetime.now() + + # Use the provided format if available, else use a default format + if format: + time_format = format + else: + time_format = "%Y-%m-%d %H:%M:%S" + + time_json = json.dumps({"current_time": current_time.strftime(time_format)}) + return time_json + + # Statically defined user functions for fast reference with send_email as async but the rest as sync user_async_functions: Set[Callable[..., Any]] = { - fetch_current_datetime, + fetch_current_datetime_async, fetch_weather, send_email_async, } diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_functions_with_azure_monitor_tracing.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_functions_with_azure_monitor_tracing.py index 0859f3bf2cb8..a08e380d6ba2 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_functions_with_azure_monitor_tracing.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_functions_with_azure_monitor_tracing.py @@ -28,6 +28,7 @@ import os, time, json from azure.ai.projects import AIProjectClient +from azure.ai.projects.telemetry import trace_function from azure.identity import DefaultAzureCredential from azure.ai.projects.models import FunctionTool, RequiredFunctionToolCall, SubmitToolOutputsAction, ToolOutput from opentelemetry import trace @@ -49,9 +50,9 @@ tracer = trace.get_tracer(__name__) -# The tracer.start_as_current_span decorator will trace the function call and enable adding additional attributes +# The trace_func decorator will trace the function call and enable adding additional attributes # to the span in the function implementation. Note that this will trace the function parameters and their values. -@tracer.start_as_current_span("fetch_weather") # type: ignore +@trace_function() def fetch_weather(location: str) -> str: """ Fetches the weather information for the specified location. diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_functions_with_console_tracing.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_functions_with_console_tracing.py index 47fd2d413a90..e0c410f1a242 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_functions_with_console_tracing.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_functions_with_console_tracing.py @@ -35,6 +35,7 @@ from azure.ai.projects import AIProjectClient from azure.identity import DefaultAzureCredential from azure.ai.projects.models import FunctionTool, RequiredFunctionToolCall, SubmitToolOutputsAction, ToolOutput +from azure.ai.projects.telemetry import trace_function from opentelemetry import trace project_client = AIProjectClient.from_connection_string( @@ -50,9 +51,9 @@ tracer = trace.get_tracer(__name__) -# The tracer.start_as_current_span decorator will trace the function call and enable adding additional attributes +# The trace_func decorator will trace the function call and enable adding additional attributes # to the span in the function implementation. Note that this will trace the function parameters and their values. -@tracer.start_as_current_span("fetch_weather") # type: ignore +@trace_function() def fetch_weather(location: str) -> str: """ Fetches the weather information for the specified location. diff --git a/sdk/ai/azure-ai-projects/tests/telemetry/gen_ai_trace_verifier.py b/sdk/ai/azure-ai-projects/tests/telemetry/gen_ai_trace_verifier.py index 067bcb2b7abb..47e423ae29eb 100644 --- a/sdk/ai/azure-ai-projects/tests/telemetry/gen_ai_trace_verifier.py +++ b/sdk/ai/azure-ai-projects/tests/telemetry/gen_ai_trace_verifier.py @@ -4,6 +4,7 @@ # ------------------------------------ import numbers import json +from typing import List from opentelemetry.sdk.trace import Span @@ -25,9 +26,9 @@ def check_span_attributes(self, span, attributes): if span.attributes[attribute_name] != attribute_value: print( "Attribute value list " - + span.attributes[attribute_name] + + str(span.attributes[attribute_name]) + " does not match with " - + attribute_value + + str(attribute_value) ) return False elif isinstance(attribute_value, tuple): @@ -35,28 +36,81 @@ def check_span_attributes(self, span, attributes): if span.attributes[attribute_name] != attribute_value: print( "Attribute value tuple " - + span.attributes[attribute_name] + + str(span.attributes[attribute_name]) + " does not match with " - + attribute_value + + str(attribute_value) ) return False else: # Check if the attribute value matches the provided value if attribute_value == "+": if not isinstance(span.attributes[attribute_name], numbers.Number): - print("Attribute value " + span.attributes[attribute_name] + " is not a number") + print("Attribute value " + str(span.attributes[attribute_name]) + " is not a number") return False if span.attributes[attribute_name] < 0: - print("Attribute value " + span.attributes[attribute_name] + " is negative") + print("Attribute value " + str(span.attributes[attribute_name]) + " is negative") return False elif attribute_value != "" and span.attributes[attribute_name] != attribute_value: print( - "Attribute value " + span.attributes[attribute_name] + " does not match with " + attribute_value + "Attribute value " + + str(span.attributes[attribute_name]) + + " does not match with " + + str(attribute_value) ) return False # Check if the attribute value in the span is not empty when the provided value is "" elif attribute_value == "" and not span.attributes[attribute_name]: - print("Excpected non-empty attribute value") + print("Expected non-empty attribute value") + return False + + return True + + def check_decorator_span_attributes(self, span: Span, attributes: List[tuple]) -> bool: + # Convert the list of tuples to a dictionary for easier lookup + attribute_dict = dict(attributes) + + # Ensure all required attributes are present in the span + for attribute_name in attribute_dict.keys(): + if attribute_name not in span.attributes: + print("Required attribute name " + attribute_name + " not found in span attributes") + return False + + for attribute_name in span.attributes.keys(): + # Check if the attribute name exists in the input attributes + if attribute_name not in attribute_dict: + print("Attribute name " + attribute_name + " not in attribute dictionary") + return False + + attribute_value = attribute_dict[attribute_name] + span_value = span.attributes[attribute_name] + + if isinstance(attribute_value, (list, tuple)): + # Convert both to lists for comparison + if list(span_value) != list(attribute_value): + print( + "Attribute value list/tuple " + str(span_value) + " does not match with " + str(attribute_value) + ) + return False + elif isinstance(attribute_value, dict): + # Check if both are dictionaries and compare them + if not isinstance(span_value, dict) or span_value != attribute_value: + print("Attribute value dict " + str(span_value) + " does not match with " + str(attribute_value)) + return False + else: + # Check if the attribute value matches the provided value + if attribute_value == "+": + if not isinstance(span_value, numbers.Number): + print("Attribute value " + str(span_value) + " is not a number") + return False + if span_value < 0: + print("Attribute value " + str(span_value) + " is negative") + return False + elif attribute_value != "" and span_value != attribute_value: + print("Attribute value " + str(span_value) + " does not match with " + str(attribute_value)) + return False + # Check if the attribute value in the span is not empty when the provided value is "" + elif attribute_value == "" and not span_value: + print("Expected non-empty attribute value") return False return True diff --git a/sdk/ai/azure-ai-projects/tests/telemetry/test_function_trace_decorator.py b/sdk/ai/azure-ai-projects/tests/telemetry/test_function_trace_decorator.py new file mode 100644 index 000000000000..ec6143896dec --- /dev/null +++ b/sdk/ai/azure-ai-projects/tests/telemetry/test_function_trace_decorator.py @@ -0,0 +1,368 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +from typing import Any, Callable, Tuple, Optional, Dict, List, Set +from opentelemetry import trace +from opentelemetry.sdk.trace import Span, TracerProvider +from opentelemetry.sdk.trace.export import SimpleSpanProcessor +from memory_trace_exporter import MemoryTraceExporter +from gen_ai_trace_verifier import GenAiTraceVerifier +from azure.ai.projects.telemetry import trace_function +from pytest import raises as pytest_raises + + +class EmptyClass: + pass + + +# Dummy helper functions with decorators +@trace_function("basic_datatypes_positional") +def basic_datatypes_positional(a: int, b: str, c: bool) -> str: + return f"{a} - {b} - {c}" + + +@trace_function("basic_datatypes_named") +def basic_datatypes_named(a: int, b: str, c: bool) -> str: + return f"{a} - {b} - {c}" + + +@trace_function("no_arguments_no_return") +def no_arguments_no_return() -> None: + pass + + +@trace_function("complex_datatypes_positional") +def complex_datatypes_positional(a: List[int], b: Dict[str, int], c: Tuple[int, int]) -> str: + print(f"Type of b: {type(b)}") # Print the type of the second argument + return f"{a} - {b} - {c}" + + +@trace_function("complex_datatypes_named") +def complex_datatypes_named(a: List[int], b: Dict[str, int], c: Tuple[int, int]) -> str: + return f"{a} - {b} - {c}" + + +@trace_function("none_argument") +def none_argument(a: Optional[int]) -> str: + return f"{a}" + + +@trace_function("none_return_value") +def none_return_value() -> None: + return None + + +@trace_function("list_argument_return_value") +def list_argument_return_value(a: List[int]) -> List[int]: + return a + + +@trace_function("dict_argument_return_value") +def dict_argument_return_value(a: Dict[str, int]) -> Dict[str, int]: + return a + + +@trace_function("tuple_argument_return_value") +def tuple_argument_return_value(a: Tuple[int, int]) -> Tuple[int, int]: + return a + + +@trace_function("set_argument_return_value") +def set_argument_return_value(a: Set[int]) -> Set[int]: + return a + + +@trace_function("raise_exception") +def raise_exception() -> None: + raise ValueError("Test exception") + + +@trace_function() +def empty_class_argument(a: EmptyClass) -> EmptyClass: + return a + + +@trace_function() +def empty_class_return_value() -> EmptyClass: + return EmptyClass() + + +@trace_function() +def list_with_empty_class(a: list) -> list: + return a + + +@trace_function() +def dict_with_empty_class(a: dict) -> dict: + return a + + +@trace_function() +def tuple_with_empty_class(a: tuple) -> tuple: + return a + + +@trace_function() +def set_with_empty_class(a: set) -> set: + return a + + +@trace_function() +def nested_collections(a: list) -> list: + return a + + +# Pytest unit tests +class TestFunctionTraceDecorator: + def setup_memory_trace_exporter(self) -> MemoryTraceExporter: + trace.set_tracer_provider(TracerProvider()) + tracer = trace.get_tracer(__name__) + memoryExporter = MemoryTraceExporter() + span_processor = SimpleSpanProcessor(memoryExporter) + trace.get_tracer_provider().add_span_processor(span_processor) + return span_processor, memoryExporter + + def test_basic_datatypes_positional_arguments(self): + processor, exporter = self.setup_memory_trace_exporter() + result = basic_datatypes_positional(1, "test", True) + assert result == "1 - test - True" + processor.force_flush() + spans = exporter.get_spans_by_name("basic_datatypes_positional") + assert len(spans) == 1 + span = spans[0] + + assert GenAiTraceVerifier().check_decorator_span_attributes( + span, + [ + ("code.function.parameter.a", 1), + ("code.function.parameter.b", "test"), + ("code.function.parameter.c", True), + ("code.function.return.value", "1 - test - True"), + ], + ) + + def test_basic_datatypes_named_arguments(self): + processor, exporter = self.setup_memory_trace_exporter() + result = basic_datatypes_named(b="test", a=1, c=True) + assert result == "1 - test - True" + processor.force_flush() + spans = exporter.get_spans_by_name("basic_datatypes_named") + assert len(spans) == 1 + span = spans[0] + assert GenAiTraceVerifier().check_decorator_span_attributes( + span, + [ + ("code.function.parameter.a", 1), + ("code.function.parameter.b", "test"), + ("code.function.parameter.c", True), + ("code.function.return.value", "1 - test - True"), + ], + ) + + def test_no_arguments_no_return_value(self): + processor, exporter = self.setup_memory_trace_exporter() + result = no_arguments_no_return() + assert result is None + processor.force_flush() + spans = exporter.get_spans_by_name("no_arguments_no_return") + assert len(spans) == 1 + span = spans[0] + assert GenAiTraceVerifier().check_decorator_span_attributes(span, []) + + def test_complex_datatypes_positional_arguments(self): + processor, exporter = self.setup_memory_trace_exporter() + result = complex_datatypes_positional([1, 2], {"key": 3}, (4, 5)) + assert result == "[1, 2] - {'key': 3} - (4, 5)" + processor.force_flush() + spans = exporter.get_spans_by_name("complex_datatypes_positional") + assert len(spans) == 1 + span = spans[0] + assert GenAiTraceVerifier().check_decorator_span_attributes( + span, + [ + ("code.function.parameter.a", [1, 2]), + ("code.function.parameter.b", "{'key': 3}"), + ("code.function.parameter.c", (4, 5)), + ("code.function.return.value", "[1, 2] - {'key': 3} - (4, 5)"), + ], + ) + + def test_complex_datatypes_named_arguments(self): + processor, exporter = self.setup_memory_trace_exporter() + result = complex_datatypes_named(a=[1, 2], b={"key": 3}, c=(4, 5)) + assert result == "[1, 2] - {'key': 3} - (4, 5)" + processor.force_flush() + spans = exporter.get_spans_by_name("complex_datatypes_named") + assert len(spans) == 1 + span = spans[0] + assert GenAiTraceVerifier().check_decorator_span_attributes( + span, + [ + ("code.function.parameter.a", [1, 2]), + ("code.function.parameter.b", "{'key': 3}"), + ("code.function.parameter.c", (4, 5)), + ("code.function.return.value", "[1, 2] - {'key': 3} - (4, 5)"), + ], + ) + + def test_none_argument(self): + processor, exporter = self.setup_memory_trace_exporter() + result = none_argument(None) + assert result == "None" + processor.force_flush() + spans = exporter.get_spans_by_name("none_argument") + assert len(spans) == 1 + span = spans[0] + assert GenAiTraceVerifier().check_decorator_span_attributes(span, [("code.function.return.value", "None")]) + + def test_none_return_value(self): + processor, exporter = self.setup_memory_trace_exporter() + result = none_return_value() + assert result is None + processor.force_flush() + spans = exporter.get_spans_by_name("none_return_value") + assert len(spans) == 1 + span = spans[0] + assert GenAiTraceVerifier().check_decorator_span_attributes(span, []) + + def test_list_argument_return_value(self): + processor, exporter = self.setup_memory_trace_exporter() + result = list_argument_return_value([1, 2, 3]) + assert result == [1, 2, 3] + processor.force_flush() + spans = exporter.get_spans_by_name("list_argument_return_value") + assert len(spans) == 1 + span = spans[0] + assert GenAiTraceVerifier().check_decorator_span_attributes( + span, [("code.function.parameter.a", [1, 2, 3]), ("code.function.return.value", [1, 2, 3])] + ) + + def test_dict_argument_return_value(self): + processor, exporter = self.setup_memory_trace_exporter() + result = dict_argument_return_value({"key": 1}) + assert result == {"key": 1} + processor.force_flush() + spans = exporter.get_spans_by_name("dict_argument_return_value") + assert len(spans) == 1 + span = spans[0] + assert GenAiTraceVerifier().check_decorator_span_attributes( + span, [("code.function.parameter.a", "{'key': 1}"), ("code.function.return.value", "{'key': 1}")] + ) + + def test_tuple_argument_return_value(self): + processor, exporter = self.setup_memory_trace_exporter() + result = tuple_argument_return_value((1, 2)) + assert result == (1, 2) + processor.force_flush() + spans = exporter.get_spans_by_name("tuple_argument_return_value") + assert len(spans) == 1 + span = spans[0] + assert GenAiTraceVerifier().check_decorator_span_attributes( + span, [("code.function.parameter.a", (1, 2)), ("code.function.return.value", (1, 2))] + ) + + def test_set_argument_return_value(self): + processor, exporter = self.setup_memory_trace_exporter() + result = set_argument_return_value({1, 2, 3}) + assert result == {1, 2, 3} + processor.force_flush() + spans = exporter.get_spans_by_name("set_argument_return_value") + assert len(spans) == 1 + span = spans[0] + assert GenAiTraceVerifier().check_decorator_span_attributes( + span, [("code.function.parameter.a", "{1, 2, 3}"), ("code.function.return.value", "{1, 2, 3}")] + ) + + def test_exception(self): + processor, exporter = self.setup_memory_trace_exporter() + try: + raise_exception() + assert False + except Exception as e: + processor.force_flush() + spans = exporter.get_spans_by_name("raise_exception") + assert len(spans) == 1 + span: Span = spans[0] + assert span.status.is_ok == False + assert span.status.description == "ValueError: Test exception" + assert GenAiTraceVerifier().check_decorator_span_attributes( + span, [("error.type", e.__class__.__qualname__)] + ) + + def test_object_argument_and_return_value(self): + processor, exporter = self.setup_memory_trace_exporter() + empty_instance = EmptyClass() + result = empty_class_argument(empty_instance) + assert result == empty_instance + processor.force_flush() + spans = exporter.get_spans_by_name("empty_class_argument") + assert len(spans) == 1 + span = spans[0] + assert GenAiTraceVerifier().check_decorator_span_attributes(span, []) + + def test_list_with_object(self): + processor, exporter = self.setup_memory_trace_exporter() + empty_instance = EmptyClass() + result = list_with_empty_class([empty_instance, 1, 2, 3]) + assert result == [empty_instance, 1, 2, 3] + processor.force_flush() + spans = exporter.get_spans_by_name("list_with_empty_class") + assert len(spans) == 1 + span = spans[0] + assert GenAiTraceVerifier().check_decorator_span_attributes( + span, [("code.function.parameter.a", [1, 2, 3]), ("code.function.return.value", [1, 2, 3])] + ) + + def test_dict_with_object(self): + processor, exporter = self.setup_memory_trace_exporter() + empty_instance = EmptyClass() + result = dict_with_empty_class({"key1": empty_instance, "key2": 1}) + assert result == {"key1": empty_instance, "key2": 1} + processor.force_flush() + spans = exporter.get_spans_by_name("dict_with_empty_class") + assert len(spans) == 1 + span = spans[0] + assert GenAiTraceVerifier().check_decorator_span_attributes( + span, [("code.function.parameter.a", "{'key2': 1}"), ("code.function.return.value", "{'key2': 1}")] + ) + + def test_tuple_with_object(self): + processor, exporter = self.setup_memory_trace_exporter() + empty_instance = EmptyClass() + result = tuple_with_empty_class((empty_instance, 1, 2, 3)) + assert result == (empty_instance, 1, 2, 3) + processor.force_flush() + spans = exporter.get_spans_by_name("tuple_with_empty_class") + assert len(spans) == 1 + span = spans[0] + assert GenAiTraceVerifier().check_decorator_span_attributes( + span, [("code.function.parameter.a", (1, 2, 3)), ("code.function.return.value", (1, 2, 3))] + ) + + def test_set_with_object(self): + processor, exporter = self.setup_memory_trace_exporter() + empty_instance = EmptyClass() + result = set_with_empty_class({empty_instance, 1, 2, 3}) + assert result == {empty_instance, 1, 2, 3} + processor.force_flush() + spans = exporter.get_spans_by_name("set_with_empty_class") + assert len(spans) == 1 + span = spans[0] + assert GenAiTraceVerifier().check_decorator_span_attributes( + span, [("code.function.parameter.a", "{1, 2, 3}"), ("code.function.return.value", "{1, 2, 3}")] + ) + + def test_nested_collections(self): + processor, exporter = self.setup_memory_trace_exporter() + nested_instance = [1, [2, 3], {"key": [4, 5]}, (6, {7, 8})] + result = nested_collections(nested_instance) + assert result == nested_instance + processor.force_flush() + spans = exporter.get_spans_by_name("nested_collections") + assert len(spans) == 1 + span = spans[0] + assert GenAiTraceVerifier().check_decorator_span_attributes( + span, + [("code.function.parameter.a", str(nested_instance)), ("code.function.return.value", str(nested_instance))], + ) diff --git a/sdk/ai/azure-ai-projects/tests/telemetry/test_function_trace_decorator_async.py b/sdk/ai/azure-ai-projects/tests/telemetry/test_function_trace_decorator_async.py new file mode 100644 index 000000000000..1dd96453fc4b --- /dev/null +++ b/sdk/ai/azure-ai-projects/tests/telemetry/test_function_trace_decorator_async.py @@ -0,0 +1,50 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +import asyncio +from opentelemetry import trace +from opentelemetry.sdk.trace import Span, TracerProvider +from opentelemetry.sdk.trace.export import SimpleSpanProcessor +from memory_trace_exporter import MemoryTraceExporter +from gen_ai_trace_verifier import GenAiTraceVerifier +from azure.ai.projects.telemetry import trace_function +import pytest + + +# Dummy helper functions with decorators +@trace_function("basic_datatypes_positional") +async def basic_datatypes_positional(a: int, b: str, c: bool) -> str: + await asyncio.sleep(1) + return f"{a} - {b} - {c}" + + +# Pytest unit tests +class TestFunctionTraceDecoratorAsync: + def setup_memory_trace_exporter(self) -> MemoryTraceExporter: + trace.set_tracer_provider(TracerProvider()) + tracer = trace.get_tracer(__name__) + memoryExporter = MemoryTraceExporter() + span_processor = SimpleSpanProcessor(memoryExporter) + trace.get_tracer_provider().add_span_processor(span_processor) + return span_processor, memoryExporter + + @pytest.mark.asyncio + async def test_basic_datatypes_positional_arguments(self): + processor, exporter = self.setup_memory_trace_exporter() + result = await basic_datatypes_positional(1, "test", True) + assert result == "1 - test - True" + processor.force_flush() + spans = exporter.get_spans_by_name("basic_datatypes_positional") + assert len(spans) == 1 + span = spans[0] + + assert GenAiTraceVerifier().check_decorator_span_attributes( + span, + [ + ("code.function.parameter.a", 1), + ("code.function.parameter.b", "test"), + ("code.function.parameter.c", True), + ("code.function.return.value", "1 - test - True"), + ], + )