|
| 1 | +"""Switch LLM profiles with the built-in switch_llm tool. |
| 2 | +
|
| 3 | +This example creates two temporary LLM profiles, starts the conversation on a |
| 4 | +GPT profile, asks the agent to call the switch_llm tool, and then verifies that |
| 5 | +future model calls use the Claude profile. |
| 6 | +
|
| 7 | +Usage: |
| 8 | + LLM_API_KEY=... LLM_BASE_URL=https://llm-proxy.app.all-hands.dev \ |
| 9 | + uv run python examples/01_standalone_sdk/49_switch_llm_tool.py |
| 10 | +""" |
| 11 | + |
| 12 | +import os |
| 13 | + |
| 14 | +from pydantic import SecretStr |
| 15 | + |
| 16 | +from openhands.sdk import LLM, Agent, LocalConversation |
| 17 | +from openhands.sdk.llm.llm_profile_store import LLMProfileStore |
| 18 | + |
| 19 | + |
| 20 | +GPT_PROFILE = "example-gpt55" |
| 21 | +CLAUDE_PROFILE = "example-claude" |
| 22 | +DEFAULT_BASE_URL = "https://llm-proxy.app.all-hands.dev" |
| 23 | +GPT_MODEL = "openai/gpt-5.5" |
| 24 | +CLAUDE_MODEL = "openai/prod/claude-sonnet-4-5-20250929" |
| 25 | + |
| 26 | +api_key = os.getenv("LLM_API_KEY") |
| 27 | +assert api_key is not None, "LLM_API_KEY environment variable is not set." |
| 28 | +base_url = os.getenv("LLM_BASE_URL", DEFAULT_BASE_URL) |
| 29 | + |
| 30 | +store = LLMProfileStore() |
| 31 | +store.save( |
| 32 | + GPT_PROFILE, |
| 33 | + LLM( |
| 34 | + model=GPT_MODEL, |
| 35 | + api_key=SecretStr(api_key), |
| 36 | + base_url=base_url, |
| 37 | + usage_id="gpt55", |
| 38 | + ), |
| 39 | + include_secrets=True, |
| 40 | +) |
| 41 | +store.save( |
| 42 | + CLAUDE_PROFILE, |
| 43 | + LLM( |
| 44 | + model=CLAUDE_MODEL, |
| 45 | + api_key=SecretStr(api_key), |
| 46 | + base_url=base_url, |
| 47 | + usage_id="claude", |
| 48 | + ), |
| 49 | + include_secrets=True, |
| 50 | +) |
| 51 | + |
| 52 | +try: |
| 53 | + initial_llm = store.load(GPT_PROFILE) |
| 54 | + agent = Agent( |
| 55 | + llm=initial_llm, |
| 56 | + tools=[], |
| 57 | + include_default_tools=["FinishTool", "SwitchLLMTool"], |
| 58 | + ) |
| 59 | + conversation = LocalConversation(agent=agent, workspace=os.getcwd()) |
| 60 | + |
| 61 | + print(f"Starting model: {conversation.agent.llm.model}") |
| 62 | + conversation.send_message( |
| 63 | + f"Call the switch_llm tool now with profile_name={CLAUDE_PROFILE!r}. " |
| 64 | + "After the tool succeeds, answer in one short sentence naming the " |
| 65 | + "active model value from the tool observation exactly." |
| 66 | + ) |
| 67 | + conversation.run() |
| 68 | + |
| 69 | + active_model = conversation.agent.llm.model |
| 70 | + print(f"Active model after tool switch: {active_model}") |
| 71 | + assert active_model == CLAUDE_MODEL |
| 72 | + |
| 73 | + for usage_id, metrics in conversation.state.stats.usage_to_metrics.items(): |
| 74 | + print(f" [{usage_id}] cost=${metrics.accumulated_cost:.6f}") |
| 75 | + |
| 76 | + combined = conversation.state.stats.get_combined_metrics() |
| 77 | + print(f"Total cost: ${combined.accumulated_cost:.6f}") |
| 78 | + print(f"EXAMPLE_COST: {combined.accumulated_cost}") |
| 79 | +finally: |
| 80 | + store.delete(GPT_PROFILE) |
| 81 | + store.delete(CLAUDE_PROFILE) |
0 commit comments