Skip to content

Feature/provider base url #1083

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 2 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions deploy/docker/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -669,6 +669,8 @@ llm:
provider: "openai/gpt-4o-mini"
api_key_env: "OPENAI_API_KEY"
# api_key: sk-... # If you pass the API key directly then api_key_env will be ignored
base_url_env: "https://api.openai.com/v1/chat/completions"
# base_url: sk-... # If you pass the API key directly then base_url_env will be ignored

# Redis Configuration (Used by internal Redis server managed by supervisord)
redis:
Expand Down
49 changes: 33 additions & 16 deletions deploy/docker/api.py
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,8 @@ async def handle_llm_qa(
response = perform_completion_with_backoff(
provider=config["llm"]["provider"],
prompt_with_variables=prompt,
api_token=os.environ.get(config["llm"].get("api_key_env", ""))
api_token=llm_api_key(config),
base_url=llm_base_url(config)
)

return response.choices[0].message.content
Expand All @@ -111,17 +112,8 @@ async def process_llm_extraction(
) -> None:
"""Process LLM extraction in background."""
try:
# If config['llm'] has api_key then ignore the api_key_env
api_key = ""
if "api_key" in config["llm"]:
api_key = config["llm"]["api_key"]
else:
api_key = os.environ.get(config["llm"].get("api_key_env", None), "")
llm_strategy = LLMExtractionStrategy(
llm_config=LLMConfig(
provider=config["llm"]["provider"],
api_token=api_key
),
llm_config=llm_config(config),
instruction=instruction,
schema=json.loads(schema) if schema else None,
)
Expand Down Expand Up @@ -181,10 +173,7 @@ async def handle_markdown_request(
FilterType.FIT: PruningContentFilter(),
FilterType.BM25: BM25ContentFilter(user_query=query or ""),
FilterType.LLM: LLMContentFilter(
llm_config=LLMConfig(
provider=config["llm"]["provider"],
api_token=os.environ.get(config["llm"].get("api_key_env", None), ""),
),
llm_config=llm_config(config),
instruction=query or "Extract main content"
)
}[filter_type]
Expand Down Expand Up @@ -520,4 +509,32 @@ async def handle_stream_crawl_request(
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
detail=str(e)
)
)

def llm_config(config: dict) -> LLMConfig:
"""Get LLM Config from config."""
return LLMConfig(
provider=config["llm"]["provider"],
base_url=llm_base_url(config),
api_token=llm_api_key(config),
)

def llm_api_key(config) -> str:
"""Get LLM API key from config or environment variable."""
# If config['llm'] has api_key then ignore the api_key_env
api_key = ""
if "api_key" in config["llm"]:
api_key = config["llm"]["api_key"]
else:
api_key = os.environ.get(config["llm"].get("api_key_env", None), "")
return api_key

def llm_base_url(config) -> str:
"""Get LLM base URL from config or environment variable."""
# If config['llm'] has base_url then ignore the base_url_env
base_url = ""
if "base_url" in config["llm"]:
base_url = config["llm"]["base_url"]
else:
base_url = os.environ.get(config["llm"].get("base_url_env", None), "")
return base_url
2 changes: 2 additions & 0 deletions deploy/docker/config.yml
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,8 @@ llm:
provider: "openai/gpt-4o-mini"
api_key_env: "OPENAI_API_KEY"
# api_key: sk-... # If you pass the API key directly then api_key_env will be ignored
base_url_env: ""
# base_url: sk-... # If you pass the BASE_URL directly then base_url_env will be ignored

# Redis Configuration
redis:
Expand Down
4 changes: 3 additions & 1 deletion docs/md_v2/core/docker-deployment.md
Original file line number Diff line number Diff line change
Expand Up @@ -669,7 +669,9 @@ llm:
provider: "openai/gpt-4o-mini"
api_key_env: "OPENAI_API_KEY"
# api_key: sk-... # If you pass the API key directly then api_key_env will be ignored

base_url_env: "https://api.openai.com/v1/chat/completions"
# base_url: sk-... # If you pass the API key directly then base_url_env will be ignored

# Redis Configuration (Used by internal Redis server managed by supervisord)
redis:
host: "localhost"
Expand Down