forked from cadence-workflow/cadence-python-client
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathopenai_activities.py
More file actions
51 lines (46 loc) · 1.68 KB
/
openai_activities.py
File metadata and controls
51 lines (46 loc) · 1.68 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
from agents import Model, OpenAIProvider
from openai import AsyncOpenAI
from openai.types.responses import ResponsePromptParam
from cadence import activity
from agents import (
TResponseInputItem,
ModelSettings,
AgentOutputSchemaBase,
ModelTracing,
ModelResponse,
)
from cadence.contrib.openai.cadence_tool import CadenceTool, from_cadence_tool
from cadence.contrib.openai.cadence_handoff import CadenceHandoff, from_cadence_handoff
class OpenAIActivities:
def __init__(self):
self._openai_provider: OpenAIProvider = OpenAIProvider(
openai_client=AsyncOpenAI(max_retries=0)
)
@activity.method
async def invoke_model(
self,
model_name: str,
system_instructions: str | None,
input: str | list[TResponseInputItem],
model_settings: ModelSettings,
tools: list[CadenceTool],
output_schema: AgentOutputSchemaBase | None,
handoffs: list[CadenceHandoff],
tracing: ModelTracing,
previous_response_id: str | None,
conversation_id: str | None,
prompt: ResponsePromptParam | None,
) -> ModelResponse:
model: Model = self._openai_provider.get_model(model_name)
return await model.get_response(
system_instructions=system_instructions,
input=input,
model_settings=model_settings,
tools=[from_cadence_tool(tool) for tool in tools],
output_schema=output_schema,
handoffs=[from_cadence_handoff(h) for h in handoffs],
tracing=tracing,
previous_response_id=previous_response_id,
conversation_id=conversation_id,
prompt=prompt,
)