|
1 | | -from autogen import UserProxyAgent, ConversableAgent |
2 | | - |
3 | | -local_llm_config = { |
4 | | - "config_list": [ |
5 | | - { |
6 | | - "model": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B", # Same as in vLLM command |
7 | | - "api_key": "NotRequired", # Not needed |
8 | | - "base_url": "http://localhost:8000/v1", # Your vLLM URL, with '/v1' added |
9 | | - } |
10 | | - ], |
11 | | - "cache_seed": None, # Turns off caching, useful for testing different models |
12 | | -} |
13 | | - |
14 | | -# Create the agent that uses the LLM. |
15 | | -assistant = ConversableAgent("agent", llm_config=local_llm_config, system_message="") |
16 | | - |
17 | | -# Create the agent that represents the user in the conversation. |
18 | | -user_proxy = UserProxyAgent("user", code_execution_config=False, system_message="") |
19 | | - |
20 | | -# Let the assistant start the conversation. It will end when the user types exit. |
21 | | -assistant.initiate_chat(user_proxy, message="How can I help you today?") |
| 1 | +from openai import OpenAI |
| 2 | +import json |
| 3 | + |
| 4 | +client = OpenAI(base_url="http://localhost:8000/v1", api_key="dummy") |
| 5 | + |
| 6 | + |
| 7 | +def get_weather(location: str, unit: str): |
| 8 | + return f"Getting the weather for {location} in {unit}..." |
| 9 | + |
| 10 | + |
| 11 | +tool_functions = {"get_weather": get_weather} |
| 12 | + |
| 13 | +tools = [ |
| 14 | + { |
| 15 | + "type": "function", |
| 16 | + "function": { |
| 17 | + "name": "get_weather", |
| 18 | + "description": "Get the current weather in a given location", |
| 19 | + "parameters": { |
| 20 | + "type": "object", |
| 21 | + "properties": { |
| 22 | + "location": { |
| 23 | + "type": "string", |
| 24 | + "description": "City and state, e.g., 'San Francisco, CA'", |
| 25 | + }, |
| 26 | + "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]}, |
| 27 | + }, |
| 28 | + "required": ["location", "unit"], |
| 29 | + }, |
| 30 | + }, |
| 31 | + } |
| 32 | +] |
| 33 | + |
| 34 | +response = client.chat.completions.create( |
| 35 | + model=client.models.list().data[0].id, |
| 36 | + messages=[{"role": "user", "content": "What's the weather like in San Francisco?"}], |
| 37 | + tools=tools, |
| 38 | + tool_choice="auto", |
| 39 | +) |
| 40 | + |
| 41 | +tool_call = response.choices[0].message.tool_calls[0].function |
| 42 | +print(f"Function called: {tool_call.name}") |
| 43 | +print(f"Arguments: {tool_call.arguments}") |
| 44 | +print(f"Result: {tool_functions[tool_call.name](**json.loads(tool_call.arguments))}") |
0 commit comments