Skip to content

Commit 41cb1e0

Browse files
authored
Copy over prompt operations (#40508)
1 parent 7ce6fbe commit 41cb1e0

9 files changed

+362
-2
lines changed

Diff for: sdk/ai/azure-ai-projects-onedp/README.md

+1-1
Original file line numberDiff line numberDiff line change
@@ -435,7 +435,7 @@ Operation returned an invalid status 'Unauthorized'
435435

436436
### Logging
437437

438-
The client uses the standard [Python logging library](https://docs.python.org/3/library/logging.html). The SDK logs HTTP request and response details, which may be useful in troubleshooting. To log to stdout, add the following:
438+
The client uses the standard [Python logging library](https://docs.python.org/3/library/logging.html). The SDK logs HTTP request and response details, which may be useful in troubleshooting. To log to stdout, add the following at the top of your Python script:
439439

440440
```python
441441
import sys

Diff for: sdk/ai/azure-ai-projects-onedp/azure/ai/projects/onedp/_patch.py

+5-1
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,7 @@
1010
from azure.core.credentials import AzureKeyCredential, TokenCredential
1111
from ._client import AIProjectClient as AIProjectClientGenerated
1212
from .operations import TelemetryOperations, InferenceOperations, AssistantsOperations
13+
from ._patch_prompts import PromptTemplate
1314

1415

1516
class AIProjectClient(AIProjectClientGenerated): # pylint: disable=too-many-instance-attributes
@@ -54,7 +55,10 @@ def __init__(self, endpoint: str, credential: Union[AzureKeyCredential, TokenCre
5455
self.assistants = AssistantsOperations(self)
5556

5657

57-
__all__: List[str] = ["AIProjectClient"] # Add all objects you want publicly available to users at this package level
58+
__all__: List[str] = [
59+
"AIProjectClient",
60+
"PromptTemplate",
61+
] # Add all objects you want publicly available to users at this package level
5862

5963

6064
def patch_sdk():
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,187 @@
1+
# pylint: disable=line-too-long,useless-suppression
2+
# ------------------------------------
3+
# Copyright (c) Microsoft Corporation.
4+
# Licensed under the MIT License.
5+
# ------------------------------------
6+
# pylint: disable=line-too-long,R,no-member
7+
"""Customize generated code here.
8+
9+
Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize
10+
"""
11+
12+
import traceback
13+
import sys
14+
from pathlib import Path
15+
from typing import Any, Dict, List, Optional
16+
from typing_extensions import Self
17+
18+
19+
class PromptTemplate:
20+
"""A helper class which takes variant of inputs, e.g. Prompty format or string, and returns the parsed prompt in an array.
21+
Prompty library is required to use this class (`pip install prompty`).
22+
"""
23+
24+
_MISSING_PROMPTY_PACKAGE_MESSAGE = (
25+
"The 'prompty' package is required in order to use the 'PromptTemplate' class. "
26+
"Please install it by running 'pip install prompty'."
27+
)
28+
29+
@classmethod
30+
def from_prompty(cls, file_path: str) -> Self:
31+
"""Initialize a PromptTemplate object from a prompty file.
32+
33+
:param file_path: The path to the prompty file.
34+
:type file_path: str
35+
:return: The PromptTemplate object.
36+
:rtype: PromptTemplate
37+
"""
38+
if not file_path:
39+
raise ValueError("Please provide file_path")
40+
41+
try:
42+
from prompty import load
43+
except ImportError as exc:
44+
raise ImportError(cls._MISSING_PROMPTY_PACKAGE_MESSAGE) from exc
45+
46+
# Get the absolute path of the file by `traceback.extract_stack()`, it's "-2" because:
47+
# In the stack, the last function is the current function.
48+
# The second last function is the caller function, which is the root of the file_path.
49+
stack = traceback.extract_stack()
50+
caller = Path(stack[-2].filename)
51+
abs_file_path = Path(caller.parent / Path(file_path)).resolve().absolute()
52+
53+
prompty = load(str(abs_file_path))
54+
prompty.template.type = "mustache" # For Azure, default to mustache instead of Jinja2
55+
return cls(prompty=prompty)
56+
57+
@classmethod
58+
def from_string(cls, prompt_template: str, api: str = "chat", model_name: Optional[str] = None) -> Self:
59+
"""Initialize a PromptTemplate object from a message template.
60+
61+
:param prompt_template: The prompt template string.
62+
:type prompt_template: str
63+
:param api: The API type, e.g. "chat" or "completion".
64+
:type api: str
65+
:param model_name: The model name, e.g. "gpt-4o-mini".
66+
:type model_name: str
67+
:return: The PromptTemplate object.
68+
:rtype: PromptTemplate
69+
"""
70+
try:
71+
from prompty import headless
72+
except ImportError as exc:
73+
raise ImportError(cls._MISSING_PROMPTY_PACKAGE_MESSAGE) from exc
74+
75+
prompt_template = cls._remove_leading_empty_space(prompt_template)
76+
prompty = headless(api=api, content=prompt_template)
77+
prompty.template.type = "mustache" # For Azure, default to mustache instead of Jinja2
78+
prompty.template.parser = "prompty"
79+
return cls(
80+
api=api,
81+
model_name=model_name,
82+
prompty=prompty,
83+
)
84+
85+
@classmethod
86+
def _remove_leading_empty_space(cls, multiline_str: str) -> str:
87+
"""
88+
Processes a multiline string by:
89+
1. Removing empty lines
90+
2. Finding the minimum leading spaces
91+
3. Indenting all lines to the minimum level
92+
93+
:param multiline_str: The input multiline string.
94+
:type multiline_str: str
95+
:return: The processed multiline string.
96+
:rtype: str
97+
"""
98+
lines = multiline_str.splitlines()
99+
start_index = 0
100+
while start_index < len(lines) and lines[start_index].strip() == "":
101+
start_index += 1
102+
103+
# Find the minimum number of leading spaces
104+
min_spaces = sys.maxsize
105+
for line in lines[start_index:]:
106+
if len(line.strip()) == 0:
107+
continue
108+
spaces = len(line) - len(line.lstrip())
109+
spaces += line.lstrip().count("\t") * 2 # Count tabs as 2 spaces
110+
min_spaces = min(min_spaces, spaces)
111+
112+
# Remove leading spaces and indent to the minimum level
113+
processed_lines = []
114+
for line in lines[start_index:]:
115+
processed_lines.append(line[min_spaces:])
116+
117+
return "\n".join(processed_lines)
118+
119+
def __init__(
120+
self,
121+
*,
122+
api: str = "chat",
123+
prompty: Optional["Prompty"] = None, # type: ignore[name-defined]
124+
prompt_template: Optional[str] = None,
125+
model_name: Optional[str] = None,
126+
) -> None:
127+
"""Create a PromptTemplate object.
128+
129+
:keyword api: The API type.
130+
:paramtype api: str
131+
:keyword prompty: Optional Prompty object.
132+
:paramtype prompty: ~prompty.Prompty or None.
133+
:keyword prompt_template: Optional prompt template string.
134+
:paramtype prompt_template: str or None.
135+
:keyword model_name: Optional AI Model name.
136+
:paramtype model_name: str or None.
137+
"""
138+
self.prompty = prompty
139+
if self.prompty is not None:
140+
self.model_name = (
141+
self.prompty.model.configuration["azure_deployment"]
142+
if "azure_deployment" in self.prompty.model.configuration
143+
else None
144+
)
145+
self.parameters = self.prompty.model.parameters
146+
self._config = {}
147+
elif prompt_template is not None:
148+
self.model_name = model_name
149+
self.parameters = {}
150+
# _config is a dict to hold the internal configuration
151+
self._config = {
152+
"api": api if api is not None else "chat",
153+
"prompt_template": prompt_template,
154+
}
155+
else:
156+
raise ValueError("Please pass valid arguments for PromptTemplate")
157+
158+
def create_messages(self, data: Optional[Dict[str, Any]] = None, **kwargs) -> List[Dict[str, Any]]:
159+
"""Render the prompt template with the given data.
160+
161+
:param data: The data to render the prompt template with.
162+
:type data: Optional[Dict[str, Any]]
163+
:return: The rendered prompt template.
164+
:rtype: List[Dict[str, Any]]
165+
"""
166+
try:
167+
from prompty import prepare
168+
except ImportError as exc:
169+
raise ImportError(self._MISSING_PROMPTY_PACKAGE_MESSAGE) from exc
170+
171+
if data is None:
172+
data = kwargs
173+
174+
if self.prompty is not None:
175+
parsed = prepare(self.prompty, data)
176+
return parsed # type: ignore
177+
else:
178+
raise ValueError("Please provide valid prompt template")
179+
180+
181+
def patch_sdk():
182+
"""Do not remove from this file.
183+
184+
`patch_sdk` is a last resort escape hatch that allows you to do customizations
185+
you can't accomplish using the techniques described in
186+
https://aka.ms/azsdk/python/dpcodegen/python/customize
187+
"""

Diff for: sdk/ai/azure-ai-projects-onedp/azure/ai/projects/onedp/operations/_patch_datasets.py

+1
Original file line numberDiff line numberDiff line change
@@ -192,6 +192,7 @@ def upload_folder_and_create(self, *, name: str, version: str, folder: str, **kw
192192
blob_name,
193193
)
194194
with file_path.open("rb") as data: # Open the file for reading in binary mode
195+
# TODO: Is there an upload_folder?
195196
# See https://learn.microsoft.com/python/api/azure-storage-blob/azure.storage.blob.containerclient?view=azure-python#azure-storage-blob-containerclient-upload-blob
196197
container_client.upload_blob(name=str(blob_name), data=data, **kwargs)
197198
logger.debug("[upload_folder_and_create] Done uploaded.")

Diff for: sdk/ai/azure-ai-projects-onedp/azure/ai/projects/onedp/operations/_patch_inference.py

+2
Original file line numberDiff line numberDiff line change
@@ -90,6 +90,7 @@ def get_chat_completions_client(self, **kwargs) -> "ChatCompletionsClient": # t
9090
) from e
9191

9292
endpoint = self._get_inference_url(self._outer_instance._config.endpoint) # pylint: disable=protected-access
93+
# TODO: Remove this before //build?
9394
# Older Inference SDK versions use ml.azure.com as the scope. Make sure to set the correct value here. This
9495
# is only relevent of course if EntraID auth is used.
9596
credential_scopes = ["https://cognitiveservices.azure.com/.default"]
@@ -243,6 +244,7 @@ def get_azure_openai_client(
243244
# use https://{resource-name}.openai.azure.com where {resource-name} is the same as the
244245
# foundry API endpoint (https://{resource-name}.services.ai.azure.com)
245246

247+
# TODO: Confirm that it's okay to do two REST API calls here.
246248
# If the connection uses API key authentication, we need to make another service call to get
247249
# the connection with API key populated.
248250
if connection.credentials.auth_type == CredentialType.API_KEY:

Diff for: sdk/ai/azure-ai-projects-onedp/dev_requirements.txt

+1
Original file line numberDiff line numberDiff line change
@@ -5,3 +5,4 @@ aiohttp
55
azure.storage.blob
66
azure.ai.inference
77
openai
8+
prompty
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,30 @@
1+
---
2+
name: Basic Prompt
3+
description: A basic prompt that uses the GPT-3 chat API to answer questions
4+
authors:
5+
- author_1
6+
- author_2
7+
model:
8+
api: chat
9+
configuration:
10+
azure_deployment: gpt-4o-mini
11+
parameters:
12+
temperature: 1
13+
frequency_penalty: 0.5
14+
presence_penalty: 0.5
15+
---
16+
system:
17+
You are an AI assistant in a hotel. You help guests with their requests and provide information about the hotel and its services.
18+
19+
# context
20+
{{#rules}}
21+
{{rule}}
22+
{{/rules}}
23+
24+
{{#chat_history}}
25+
{{role}}:
26+
{{content}}
27+
{{/chat_history}}
28+
29+
user:
30+
{{input}}
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,76 @@
1+
# ------------------------------------
2+
# Copyright (c) Microsoft Corporation.
3+
# Licensed under the MIT License.
4+
# ------------------------------------
5+
6+
"""
7+
DESCRIPTION:
8+
Given an AIProjectClient, this sample demonstrates how to
9+
* Get an authenticated ChatCompletionsClient from the azure.ai.inference package
10+
* Define a Mustache template, and render the template with provided parameters to create a list of chat messages.
11+
* Perform one chat completion operation.
12+
Package azure.ai.inference required. For more information see https://pypi.org/project/azure-ai-inference/.
13+
Package prompty required. For more information see https://pypi.org/project/prompty/.
14+
15+
USAGE:
16+
sample_chat_completions_with_azure_ai_inference_client_and_prompt_string.py
17+
18+
Before running the sample:
19+
20+
pip install azure-ai-projects azure-ai-inference azure-identity prompty
21+
22+
Set these environment variables with your own values:
23+
1) PROJECT_ENDPOINT - The Azure AI Project endpoint, as found in the overview page of your
24+
Azure AI Foundry project.
25+
2) DEPLOYMENT_NAME - The AI model deployment name, as found in your AI Foundry project.
26+
"""
27+
28+
import os
29+
from azure.identity import DefaultAzureCredential
30+
from azure.ai.projects.onedp import AIProjectClient, PromptTemplate
31+
32+
endpoint = os.environ["PROJECT_ENDPOINT"]
33+
model_deployment_name = os.environ["MODEL_DEPLOYMENT_NAME"]
34+
35+
with AIProjectClient(
36+
endpoint=endpoint,
37+
credential=DefaultAzureCredential(exclude_interactive_browser_credential=False),
38+
) as project_client:
39+
40+
with project_client.inference.get_chat_completions_client() as client:
41+
42+
prompt_template_str = """
43+
system:
44+
You are an AI assistant in a hotel. You help guests with their requests and provide information about the hotel and its services.
45+
46+
# context
47+
{{#rules}}
48+
{{rule}}
49+
{{/rules}}
50+
51+
{{#chat_history}}
52+
{{role}}:
53+
{{content}}
54+
{{/chat_history}}
55+
56+
user:
57+
{{input}}
58+
"""
59+
prompt_template = PromptTemplate.from_string(api="chat", prompt_template=prompt_template_str)
60+
61+
input = "When I arrived, can I still have breakfast?"
62+
rules = [
63+
{"rule": "The check-in time is 3pm"},
64+
{"rule": "The check-out time is 11am"},
65+
{"rule": "Breakfast is served from 7am to 10am"},
66+
]
67+
chat_history = [
68+
{"role": "user", "content": "I'll arrive at 2pm. What's the check-in and check-out time?"},
69+
{"role": "system", "content": "The check-in time is 3 PM, and the check-out time is 11 AM."},
70+
]
71+
messages = prompt_template.create_messages(input=input, rules=rules, chat_history=chat_history)
72+
print(messages)
73+
74+
response = client.complete(model=model_deployment_name, messages=messages)
75+
76+
print(response.choices[0].message.content)

0 commit comments

Comments
 (0)