|
8 | 8 | All links and storage that call OpenAI should use get_openai_client(opts) so |
9 | 9 | LLM provider and proxy can be switched in one place. |
10 | 10 | """ |
11 | | -from openai import OpenAI, AzureOpenAI |
| 11 | +from openai import OpenAI, AzureOpenAI, AsyncOpenAI, AsyncAzureOpenAI |
12 | 12 |
|
13 | 13 | from lib.logging_utils import init_logger |
14 | 14 |
|
|
21 | 21 | def get_openai_client(opts=None): |
22 | 22 | """ |
23 | 23 | Return an OpenAI-compatible client (OpenAI or AzureOpenAI). |
| 24 | + Same client is used for chat and embeddings; LiteLLM proxy supports both. |
24 | 25 |
|
25 | 26 | opts: dict of options. All values are read from opts only. |
26 | 27 |
|
27 | 28 | Supported keys in opts: |
28 | | - - LITELLM_PROXY_URL, LITELLM_MASTER_KEY -> use LiteLLM proxy |
| 29 | + - LITELLM_PROXY_URL, LITELLM_MASTER_KEY -> use LiteLLM proxy (chat + embeddings) |
29 | 30 | - AZURE_OPENAI_ENDPOINT, AZURE_OPENAI_API_KEY, AZURE_OPENAI_API_VERSION -> Azure |
30 | 31 | - OPENAI_API_KEY or openai_api_key or api_key -> OpenAI |
31 | 32 | - organization / organization_key, project / project_key (optional) |
@@ -83,3 +84,49 @@ def get_openai_client(opts=None): |
83 | 84 | "Set LITELLM_PROXY_URL + LITELLM_MASTER_KEY, or " |
84 | 85 | "AZURE_OPENAI_ENDPOINT + AZURE_OPENAI_API_KEY, or OPENAI_API_KEY (or api_key)" |
85 | 86 | ) |
| 87 | + |
| 88 | + |
| 89 | +def get_async_openai_client(opts=None): |
| 90 | + """ |
| 91 | + Return an async OpenAI-compatible client. Same opts semantics as get_openai_client. |
| 92 | + LiteLLM proxy is used for both chat and embeddings when configured. |
| 93 | + """ |
| 94 | + opts = opts or {} |
| 95 | + |
| 96 | + litellm_url = (opts.get("LITELLM_PROXY_URL") or "").strip().rstrip("/") |
| 97 | + litellm_key = (opts.get("LITELLM_MASTER_KEY") or "").strip() |
| 98 | + if litellm_url and litellm_key: |
| 99 | + logger.info("Using LiteLLM proxy at %s (async)", litellm_url) |
| 100 | + return AsyncOpenAI( |
| 101 | + api_key=litellm_key, |
| 102 | + base_url=litellm_url + "/v1", |
| 103 | + timeout=120.0, |
| 104 | + max_retries=0, |
| 105 | + ) |
| 106 | + |
| 107 | + azure_endpoint = (opts.get("AZURE_OPENAI_ENDPOINT") or "").strip() |
| 108 | + azure_api_key = (opts.get("AZURE_OPENAI_API_KEY") or "").strip() |
| 109 | + azure_api_version = opts.get("AZURE_OPENAI_API_VERSION") or DEFAULT_AZURE_OPENAI_API_VERSION |
| 110 | + if azure_endpoint and azure_api_key: |
| 111 | + logger.info("Using Azure OpenAI client at endpoint: %s (async)", azure_endpoint) |
| 112 | + return AsyncAzureOpenAI( |
| 113 | + api_key=azure_api_key, |
| 114 | + azure_endpoint=azure_endpoint, |
| 115 | + api_version=azure_api_version, |
| 116 | + timeout=120.0, |
| 117 | + max_retries=0, |
| 118 | + ) |
| 119 | + |
| 120 | + openai_api_key = ( |
| 121 | + opts.get("OPENAI_API_KEY") |
| 122 | + or opts.get("openai_api_key") |
| 123 | + or opts.get("api_key") |
| 124 | + ) |
| 125 | + if openai_api_key: |
| 126 | + logger.info("Using public OpenAI client (async)") |
| 127 | + return AsyncOpenAI(api_key=openai_api_key, timeout=120.0, max_retries=0) |
| 128 | + |
| 129 | + raise ValueError( |
| 130 | + "Set LITELLM_PROXY_URL + LITELLM_MASTER_KEY, or " |
| 131 | + "AZURE_OPENAI_ENDPOINT + AZURE_OPENAI_API_KEY, or OPENAI_API_KEY (or api_key)" |
| 132 | + ) |
0 commit comments