Skip to content

Commit 03bc0c3

Browse files
committed
import without individual providers
1 parent 94b81f7 commit 03bc0c3

File tree

10 files changed

+146
-90
lines changed

10 files changed

+146
-90
lines changed

.github/workflows/test.yml

Lines changed: 11 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -23,10 +23,18 @@ jobs:
2323
sudo apt-get update
2424
sudo apt-get install -y tesseract-ocr
2525
26-
- name: Install dependencies
26+
- name: Install core package only (no providers)
2727
run: |
2828
python -m pip install --upgrade pip
29-
pip install -e ".[dev]"
29+
pip install -e .
30+
31+
- name: Verify import without provider SDKs
32+
run: |
33+
python -c "from covenance import ask_llm, Covenance, print_usage; print('Import OK')"
34+
35+
- name: Install all dependencies
36+
run: |
37+
pip install -e ".[all,dev]"
3038
3139
- name: Run pytest
3240
run: pytest --cov --cov-branch --cov-report=xml
@@ -36,4 +44,4 @@ jobs:
3644
- name: Upload coverage reports to Codecov
3745
uses: codecov/codecov-action@v5
3846
with:
39-
token: ${{ secrets.CODECOV_TOKEN }}
47+
token: ${{ secrets.CODECOV_TOKEN }}

covenance/client.py

Lines changed: 22 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -12,16 +12,10 @@
1212
from pathlib import Path
1313
from typing import Any
1414

15-
from anthropic import Anthropic
16-
from google import genai
17-
from mistralai import Mistral
18-
from openai import OpenAI
1915
from pydantic import TypeAdapter, ValidationError
2016

2117
from ._caller_context import capture_caller_context
2218
from ._lazy_client import LazyClient
23-
from .clients.grok_client import GROK_BASE_URL
24-
from .clients.openrouter_client import OPENROUTER_BASE_URL
2519
from .exceptions import StructuredOutputParsingError
2620
from .keys import (
2721
get_anthropic_api_key,
@@ -105,33 +99,49 @@ def _require_key(
10599
) -> str:
106100
return require_api_key(override or getter(), provider)
107101

108-
def _create_openai_client(self) -> OpenAI:
102+
def _create_openai_client(self):
103+
from openai import OpenAI
104+
109105
api_key = self._require_key(self._openai_api_key, "openai", get_openai_api_key)
110106
return OpenAI(api_key=api_key)
111107

112-
def _create_openrouter_client(self) -> OpenAI:
108+
def _create_openrouter_client(self):
109+
from openai import OpenAI
110+
111+
from .clients.openrouter_client import OPENROUTER_BASE_URL
112+
113113
api_key = self._require_key(
114114
self._openrouter_api_key, "openrouter", get_openrouter_api_key
115115
)
116116
return OpenAI(api_key=api_key, base_url=OPENROUTER_BASE_URL)
117117

118-
def _create_gemini_client(self) -> genai.Client:
118+
def _create_gemini_client(self):
119+
from google import genai
120+
119121
api_key = self._require_key(self._gemini_api_key, "gemini", get_gemini_api_key)
120122
return genai.Client(api_key=api_key)
121123

122-
def _create_mistral_client(self) -> Mistral:
124+
def _create_mistral_client(self):
125+
from mistralai import Mistral
126+
123127
api_key = self._require_key(
124128
self._mistral_api_key, "mistral", get_mistral_api_key
125129
)
126130
return Mistral(api_key=api_key)
127131

128-
def _create_anthropic_client(self) -> Anthropic:
132+
def _create_anthropic_client(self):
133+
from anthropic import Anthropic
134+
129135
api_key = self._require_key(
130136
self._anthropic_api_key, "anthropic", get_anthropic_api_key
131137
)
132138
return Anthropic(api_key=api_key)
133139

134-
def _create_grok_client(self) -> OpenAI:
140+
def _create_grok_client(self):
141+
from openai import OpenAI
142+
143+
from .clients.grok_client import GROK_BASE_URL
144+
135145
api_key = self._require_key(self._grok_api_key, "grok", get_grok_api_key)
136146
return OpenAI(api_key=api_key, base_url=GROK_BASE_URL)
137147

covenance/clients/anthropic_client.py

Lines changed: 11 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -4,23 +4,27 @@
44
from datetime import UTC, datetime
55
from typing import TYPE_CHECKING, TypeVar
66

7-
from anthropic import Anthropic, APIError, RateLimitError
87
from pydantic import BaseModel
98

109
from covenance._lazy_client import LazyClient
11-
from covenance.exceptions import StructuredOutputParsingError
10+
from covenance.exceptions import StructuredOutputParsingError, require_provider
1211
from covenance.keys import get_anthropic_api_key, require_api_key
1312
from covenance.models import ClaudeModels
1413
from covenance.record import TokenUsage
1514
from covenance.retry import exponential_backoff
1615

1716
if TYPE_CHECKING:
17+
from anthropic import Anthropic
18+
1819
from covenance.record import RecordStore
1920

2021
T = TypeVar("T")
2122

2223

23-
def _create_anthropic_client() -> Anthropic:
24+
def _create_anthropic_client() -> "Anthropic":
25+
require_provider("anthropic")
26+
from anthropic import Anthropic
27+
2428
api_key = require_api_key(get_anthropic_api_key(), "anthropic")
2529
return Anthropic(api_key=api_key)
2630

@@ -95,7 +99,7 @@ def ask_anthropic[T](
9599
sys_msg: str | None = None,
96100
model: str = ClaudeModels.haiku45,
97101
*,
98-
client_override: Anthropic | None = None,
102+
client_override: "Anthropic | None" = None,
99103
record_store: "RecordStore | None" = None,
100104
temperature: float | None = None,
101105
) -> T:
@@ -105,22 +109,11 @@ def ask_anthropic[T](
105109
to get structured output. Retries up to 100 times when encountering rate limit errors.
106110
107111
If response_type is str, performs a standard chat completion and returns the text.
108-
109-
Args:
110-
user_msg: User message/prompt
111-
response_type: Pydantic model class for structured output, or str/None for plain text
112-
sys_msg: Optional system message/instructions
113-
model: Claude model identifier (defaults to claude-3-5-haiku)
114-
115-
Returns:
116-
Parsed Pydantic object of type T, or str if response_type is str
117-
118-
Raises:
119-
StructuredOutputParsingError: If parsing fails
120-
Exception: After max_attempts retries on rate limit errors
121112
"""
113+
from anthropic import APIError, RateLimitError
114+
122115
max_attempts = 100
123-
api_client = client_override or client
116+
api_client = client_override or client # type: ignore[assignment]
124117

125118
# Handle plain text output
126119
is_plain_text = response_type is str or response_type is None

covenance/clients/google_client.py

Lines changed: 12 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -4,16 +4,15 @@
44
from datetime import UTC, datetime
55
from typing import TYPE_CHECKING, TypeVar
66

7-
from google import genai # pip install --upgrade google-genai
8-
from google.genai.errors import ClientError
9-
107
from covenance._lazy_client import LazyClient
11-
from covenance.exceptions import StructuredOutputParsingError
8+
from covenance.exceptions import StructuredOutputParsingError, require_provider
129
from covenance.keys import get_gemini_api_key, require_api_key
1310
from covenance.models import GeminiModels
1411
from covenance.record import TokenUsage
1512

1613
if TYPE_CHECKING:
14+
from google import genai
15+
1716
from covenance.record import RecordStore
1817

1918
# Suppress warning about non-text parts (thought_signature) in Gemini responses.
@@ -27,7 +26,10 @@
2726
T = TypeVar("T")
2827

2928

30-
def _create_gemini_client() -> genai.Client:
29+
def _create_gemini_client() -> "genai.Client":
30+
require_provider("google")
31+
from google import genai
32+
3133
api_key = require_api_key(get_gemini_api_key(), "gemini")
3234
return genai.Client(api_key=api_key)
3335

@@ -38,7 +40,7 @@ def _create_gemini_client() -> genai.Client:
3840
VERBOSE = False
3941

4042

41-
def _parse_wait_time_from_error(error: ClientError) -> float:
43+
def _parse_wait_time_from_error(error: Exception) -> float:
4244
"""Parse wait time from Gemini ClientError message.
4345
4446
The error message typically contains: "Please retry in X.XXXs."
@@ -99,7 +101,7 @@ def ask_gemini[T](
99101
sys_msg: str | None = None,
100102
model: str = GeminiModels.flash_25.value,
101103
*,
102-
client_override: genai.Client | None = None,
104+
client_override: "genai.Client | None" = None,
103105
record_store: "RecordStore | None" = None,
104106
temperature: float | None = None,
105107
) -> T:
@@ -109,15 +111,11 @@ def ask_gemini[T](
109111
parsing the wait time from the error message and waiting accordingly.
110112
111113
If response_type is str or None, performs a standard chat completion and returns the text.
112-
113-
`response_type` can be:
114-
- a Pydantic model -> returns a model instance
115-
- list[MyModel] -> returns the annotated container
116-
- a builtin typing annotation -> returns that type
117-
- str -> returns plain text
118114
"""
115+
from google.genai.errors import ClientError
116+
119117
max_attempts = 100
120-
api_client = client_override or client
118+
api_client = client_override or client # type: ignore[assignment]
121119
total_tpm_wait = 0.0 # Accumulate TPM retry wait time
122120
started_at = datetime.now(UTC) # Record absolute start time
123121

covenance/clients/grok_client.py

Lines changed: 9 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -2,20 +2,24 @@
22

33
from typing import TYPE_CHECKING
44

5-
from openai import OpenAI
6-
75
from covenance._lazy_client import LazyClient
86
from covenance.clients.openai_client import ask_openai_compatible_structured
7+
from covenance.exceptions import require_provider
98
from covenance.keys import get_grok_api_key, require_api_key
109
from covenance.models import GrokModels
1110

1211
if TYPE_CHECKING:
12+
from openai import OpenAI
13+
1314
from covenance.record import RecordStore
1415

1516
GROK_BASE_URL = "https://api.x.ai/v1"
1617

1718

18-
def _create_grok_client() -> OpenAI:
19+
def _create_grok_client() -> "OpenAI":
20+
require_provider("openai")
21+
from openai import OpenAI
22+
1923
api_key = require_api_key(get_grok_api_key(), "grok")
2024
return OpenAI(api_key=api_key, base_url=GROK_BASE_URL)
2125

@@ -29,12 +33,12 @@ def ask_grok[T](
2933
sys_msg: str | None = None,
3034
model: str = GrokModels.grok4_fast.value,
3135
*,
32-
client_override: OpenAI | None = None,
36+
client_override: "OpenAI | None" = None,
3337
record_store: "RecordStore | None" = None,
3438
temperature: float | None = None,
3539
) -> T:
3640
"""Call xAI Grok API with automatic retry."""
37-
api_client = client_override or client
41+
api_client = client_override or client # type: ignore[assignment]
3842
return ask_openai_compatible_structured(
3943
client=api_client,
4044
user_msg=user_msg,

covenance/clients/mistral_client.py

Lines changed: 11 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -4,23 +4,25 @@
44
from datetime import UTC, datetime
55
from typing import TYPE_CHECKING, TypeVar
66

7-
from mistralai import Mistral
8-
from mistralai.models import HTTPValidationError, SDKError
9-
107
from covenance._lazy_client import LazyClient
11-
from covenance.exceptions import StructuredOutputParsingError
8+
from covenance.exceptions import StructuredOutputParsingError, require_provider
129
from covenance.keys import get_mistral_api_key, require_api_key
1310
from covenance.models import MistralModels
1411
from covenance.record import TokenUsage
1512
from covenance.retry import exponential_backoff
1613

1714
if TYPE_CHECKING:
15+
from mistralai import Mistral
16+
1817
from covenance.record import RecordStore
1918

2019
T = TypeVar("T")
2120

2221

23-
def _create_mistral_client() -> Mistral:
22+
def _create_mistral_client() -> "Mistral":
23+
require_provider("mistral")
24+
from mistralai import Mistral
25+
2426
api_key = require_api_key(get_mistral_api_key(), "mistral")
2527
return Mistral(api_key=api_key)
2628

@@ -76,7 +78,7 @@ def ask_mistral[T](
7678
sys_msg: str | None = None,
7779
model: str = MistralModels.small.value,
7880
*,
79-
client_override: Mistral | None = None,
81+
client_override: "Mistral | None" = None,
8082
record_store: "RecordStore | None" = None,
8183
temperature: float | None = None,
8284
) -> T:
@@ -86,21 +88,11 @@ def ask_mistral[T](
8688
output directly. Retries up to 100 times when encountering rate limit errors.
8789
8890
If response_type is str or None, performs a standard chat completion and returns the text.
89-
90-
Args:
91-
user_msg: User message/prompt
92-
response_type: Pydantic model class for structured output, or str/None for plain text
93-
sys_msg: Optional system message/instructions
94-
model: Mistral model identifier (defaults to mistral-small-latest)
95-
96-
Returns:
97-
Parsed Pydantic object of type T, or str if response_type is str
98-
99-
Raises:
100-
Exception: After max_attempts retries on rate limit errors
10191
"""
92+
from mistralai.models import HTTPValidationError, SDKError
93+
10294
max_attempts = 100
103-
api_client = client_override or client
95+
api_client = client_override or client # type: ignore[assignment]
10496

10597
# Build messages array
10698
messages = []

0 commit comments

Comments
 (0)