Skip to content

Commit 59e9ab8

Browse files
authored
chore: enforce dotenv override and warn on key conflict (google#282)
Enforces dotenv override in entry points and warns on conflicting API keys.
1 parent 5c780e4 commit 59e9ab8

File tree

5 files changed

+29
-6
lines changed

5 files changed

+29
-6
lines changed

benchmarks/benchmark.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -45,6 +45,8 @@
4545
from typing import Any
4646
import urllib.error
4747

48+
import dotenv
49+
4850
from benchmarks import config
4951
from benchmarks import plotting
5052
from benchmarks import utils
@@ -55,6 +57,7 @@
5557
import langextract.io as lio
5658

5759
# Load API key from environment
60+
dotenv.load_dotenv(override=True)
5861
GEMINI_API_KEY = os.environ.get(
5962
"GEMINI_API_KEY", os.environ.get("LANGEXTRACT_API_KEY")
6063
)

examples/custom_provider_plugin/test_example_provider.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@
1717

1818
import os
1919

20+
import dotenv
2021
# Import the provider to trigger registration with LangExtract
2122
# Note: This manual import is only needed when running without installation.
2223
# After `pip install -e .`, the entry point system handles this automatically.
@@ -27,6 +28,7 @@
2728

2829
def main():
2930
"""Test the custom provider."""
31+
dotenv.load_dotenv(override=True)
3032
api_key = os.getenv("GEMINI_API_KEY") or os.getenv("LANGEXTRACT_API_KEY")
3133

3234
if not api_key:

examples/ollama/demo_ollama.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -52,9 +52,13 @@
5252
import urllib.error
5353
import urllib.request
5454

55+
import dotenv
56+
5557
import langextract as lx
5658
from langextract.providers import ollama
5759

60+
dotenv.load_dotenv(override=True)
61+
5862
DEFAULT_MODEL = "gemma2:2b"
5963
DEFAULT_OLLAMA_URL = os.environ.get("OLLAMA_HOST", "http://localhost:11434")
6064
OUTPUT_DIR = "test_output"

langextract/factory.py

Lines changed: 19 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,7 @@
2424
import dataclasses
2525
import os
2626
import typing
27+
import warnings
2728

2829
from langextract import providers
2930
from langextract.core import base_model
@@ -72,11 +73,23 @@ def _kwargs_with_environment_defaults(
7273

7374
for provider_prefix, env_vars in env_vars_by_provider.items():
7475
if provider_prefix in model_lower:
76+
found_keys = []
7577
for env_var in env_vars:
76-
api_key = os.getenv(env_var)
77-
if api_key:
78-
resolved["api_key"] = api_key
79-
break
78+
key_val = os.getenv(env_var)
79+
if key_val:
80+
found_keys.append((env_var, key_val))
81+
82+
if found_keys:
83+
resolved["api_key"] = found_keys[0][1]
84+
85+
if len(found_keys) > 1:
86+
keys_list = ", ".join(k[0] for k in found_keys)
87+
warnings.warn(
88+
f"Multiple API keys detected in environment: {keys_list}. "
89+
f"Using {found_keys[0][0]} and ignoring others.",
90+
UserWarning,
91+
stacklevel=3,
92+
)
8093
break
8194

8295
if "ollama" in model_id.lower() and "base_url" not in resolved:
@@ -126,7 +139,6 @@ def create_model(
126139
if not config.model_id and not config.provider:
127140
raise ValueError("Either model_id or provider must be specified")
128141

129-
# Load providers before any resolution
130142
providers.load_builtins_once()
131143
providers.load_plugins_once()
132144

@@ -144,6 +156,8 @@ def create_model(
144156

145157
model_id = config.model_id
146158

159+
model_id = config.model_id
160+
147161
kwargs = _kwargs_with_environment_defaults(
148162
model_id or config.provider or "", config.provider_kwargs
149163
)

tests/test_live_api.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,7 @@
3838
import langextract as lx
3939
from langextract.providers import gemini_batch as gb
4040

41-
dotenv.load_dotenv()
41+
dotenv.load_dotenv(override=True)
4242

4343
DEFAULT_GEMINI_MODEL = "gemini-2.5-flash"
4444
DEFAULT_OPENAI_MODEL = "gpt-4o"

0 commit comments

Comments
 (0)