Skip to content

Commit 0c600bd

Browse files
committed
uv format, bug fixes, removed uneeded test
1 parent 0b03d69 commit 0c600bd

File tree

8 files changed

+246
-247
lines changed

8 files changed

+246
-247
lines changed

tacho/ai.py

Lines changed: 9 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -24,13 +24,15 @@ async def ping_model(model: str, console) -> bool:
2424
async def bench_model(model: str, max_tokens: int) -> tuple[float, int]:
2525
"""Measure inference time for a single run and return time and tokens"""
2626
start_time = time.time()
27-
response = await llm(model, BENCHMARK_PROMPT, max_tokens)
27+
res = await llm(model, BENCHMARK_PROMPT, max_tokens)
2828
duration = time.time() - start_time
29-
3029

31-
tokens = response.usage.completion_tokens
32-
if hasattr(response.usage, 'completion_tokens_details') and response.usage.completion_tokens_details:
33-
if hasattr(response.usage.completion_tokens_details, 'reasoning_tokens'):
34-
tokens += response.usage.completion_tokens_details.reasoning_tokens
35-
30+
tokens = res.usage.completion_tokens
31+
if (
32+
hasattr(res.usage, "completion_tokens_details")
33+
and res.usage.completion_tokens_details
34+
):
35+
if hasattr(res.usage.completion_tokens_details, "reasoning_tokens"):
36+
tokens += res.usage.completion_tokens_details.reasoning_tokens
37+
3638
return duration, tokens

tacho/config.py

Lines changed: 11 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -16,10 +16,10 @@ def ensure_env_file():
1616
"""Create .env file with helpful comments if it doesn't exist."""
1717
env_path = get_env_path()
1818
env_dir = env_path.parent
19-
19+
2020
# Create directory if needed
2121
env_dir.mkdir(exist_ok=True)
22-
22+
2323
# Create .env file with comments if it doesn't exist
2424
if not env_path.exists():
2525
template = """# Tacho Configuration File
@@ -42,16 +42,19 @@ def ensure_env_file():
4242
4343
"""
4444
env_path.write_text(template)
45-
45+
4646
# Set restrictive permissions on Unix-like systems
47-
if os.name != 'nt': # Not Windows
47+
if os.name != "nt": # Not Windows
4848
os.chmod(env_path, 0o600)
49-
49+
5050
# Notify user about the created file
5151
from rich.console import Console
52+
5253
console = Console()
5354
console.print(f"\n[yellow]Created config file at {env_path}[/yellow]")
54-
console.print("Add your API keys to this file to avoid exporting them each time.\n")
55+
console.print(
56+
"Add your API keys to this file to avoid exporting them each time.\n"
57+
)
5558

5659

5760
def configure_logging():
@@ -67,6 +70,6 @@ def load_env():
6770
ensure_env_file()
6871
env_path = get_env_path()
6972
load_dotenv(env_path)
70-
73+
7174
# Also configure logging when loading env
72-
configure_logging()
75+
configure_logging()

tests/conftest.py

Lines changed: 17 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -1,25 +1,17 @@
1-
import asyncio
2-
from pathlib import Path
3-
from unittest.mock import MagicMock
4-
try:
5-
from unittest.mock import AsyncMock
6-
except ImportError:
7-
# Python < 3.8 compatibility
8-
from mock import AsyncMock
9-
1+
from unittest.mock import MagicMock, AsyncMock
102
import pytest
113

124

135
@pytest.fixture
146
def mock_litellm(mocker):
157
"""Mock litellm.acompletion to avoid actual API calls"""
16-
mock = mocker.patch('litellm.acompletion', new_callable=AsyncMock)
17-
8+
mock = mocker.patch("litellm.acompletion", new_callable=AsyncMock)
9+
1810
# Default mock response structure
1911
mock_response = MagicMock()
2012
mock_response.usage.completion_tokens = 100
2113
mock_response.choices = [MagicMock(message=MagicMock(content="Mock response"))]
22-
14+
2315
mock.return_value = mock_response
2416
return mock
2517

@@ -28,8 +20,8 @@ def mock_litellm(mocker):
2820
def mock_console(mocker):
2921
"""Mock Rich console to capture output"""
3022
console_mock = MagicMock()
31-
mocker.patch('tacho.display.console', console_mock)
32-
mocker.patch('tacho.cli.console', console_mock)
23+
mocker.patch("tacho.display.console", console_mock)
24+
mocker.patch("tacho.cli.console", console_mock)
3325
# Note: tacho.ai doesn't use console directly anymore
3426
return console_mock
3527

@@ -39,17 +31,17 @@ def temp_tacho_dir(tmp_path, mocker):
3931
"""Create temporary .tacho directory for testing"""
4032
tacho_dir = tmp_path / ".tacho"
4133
tacho_dir.mkdir()
42-
34+
4335
# Mock the get_env_path function to use our temp directory
44-
mocker.patch('tacho.config.get_env_path', return_value=tacho_dir / ".env")
45-
36+
mocker.patch("tacho.config.get_env_path", return_value=tacho_dir / ".env")
37+
4638
return tacho_dir
4739

4840

4941
@pytest.fixture
5042
def mock_load_dotenv(mocker):
5143
"""Mock dotenv loading"""
52-
return mocker.patch('tacho.config.load_dotenv')
44+
return mocker.patch("tacho.config.load_dotenv")
5345

5446

5547
@pytest.fixture
@@ -60,8 +52,8 @@ def mock_progress(mocker):
6052
progress_mock.__exit__ = MagicMock(return_value=None)
6153
progress_mock.add_task = MagicMock(return_value="task_id")
6254
progress_mock.console = MagicMock()
63-
64-
mocker.patch('tacho.display.Progress', return_value=progress_mock)
55+
56+
mocker.patch("tacho.display.Progress", return_value=progress_mock)
6557
return progress_mock
6658

6759

@@ -70,11 +62,11 @@ def sample_benchmark_results():
7062
"""Sample benchmark results for testing display functions"""
7163
return [
7264
(2.5, 100), # Model 1, Run 1: 2.5s, 100 tokens
73-
(2.3, 98), # Model 1, Run 2: 2.3s, 98 tokens
65+
(2.3, 98), # Model 1, Run 2: 2.3s, 98 tokens
7466
(2.7, 102), # Model 1, Run 3: 2.7s, 102 tokens
75-
(1.8, 95), # Model 2, Run 1: 1.8s, 95 tokens
76-
(1.9, 97), # Model 2, Run 2: 1.9s, 97 tokens
77-
(1.7, 93), # Model 2, Run 3: 1.7s, 93 tokens
67+
(1.8, 95), # Model 2, Run 1: 1.8s, 95 tokens
68+
(1.9, 97), # Model 2, Run 2: 1.9s, 97 tokens
69+
(1.7, 93), # Model 2, Run 3: 1.7s, 93 tokens
7870
]
7971

8072

@@ -94,4 +86,4 @@ def mock_env_vars(monkeypatch):
9486
@pytest.fixture(autouse=True)
9587
def mock_cli_load_env(mocker):
9688
"""Mock the load_env call in cli module to prevent file operations during import"""
97-
mocker.patch('tacho.cli.load_env')
89+
mocker.patch("tacho.cli.load_env")

tests/test_ai.py

Lines changed: 47 additions & 71 deletions
Original file line numberDiff line numberDiff line change
@@ -11,73 +11,67 @@ class TestAI:
1111
async def test_llm_basic_call(self, mock_litellm):
1212
"""Test basic LLM call functionality"""
1313
result = await llm("gpt-4", "Test prompt", 100)
14-
14+
1515
# Verify litellm was called correctly
1616
mock_litellm.assert_called_once_with(
17-
"gpt-4",
18-
[{"role": "user", "content": "Test prompt"}],
19-
max_tokens=100
17+
"gpt-4", [{"role": "user", "content": "Test prompt"}], max_tokens=100
2018
)
21-
19+
2220
# Verify response is returned
2321
assert result == mock_litellm.return_value
24-
22+
2523
@pytest.mark.asyncio
2624
async def test_llm_without_max_tokens(self, mock_litellm):
2725
"""Test LLM call without specifying max tokens"""
2826
await llm("gpt-4", "Test prompt")
29-
27+
3028
mock_litellm.assert_called_once_with(
31-
"gpt-4",
32-
[{"role": "user", "content": "Test prompt"}],
33-
max_tokens=None
29+
"gpt-4", [{"role": "user", "content": "Test prompt"}], max_tokens=None
3430
)
35-
31+
3632
@pytest.mark.asyncio
3733
async def test_ping_model_success(self, mock_litellm):
3834
"""Test successful model ping"""
3935
# Create a mock console that can be used in the context
4036
mock_console_instance = MagicMock()
41-
37+
4238
result = await ping_model("gpt-4", mock_console_instance)
43-
39+
4440
# Verify success
4541
assert result is True
46-
42+
4743
# Verify console output
4844
mock_console_instance.print.assert_called_once_with("[green]✓[/green] gpt-4")
49-
45+
5046
# Verify LLM was called with validation prompt
5147
mock_litellm.assert_called_once_with(
52-
"gpt-4",
53-
[{"role": "user", "content": VALIDATION_PROMPT}],
54-
max_tokens=1
48+
"gpt-4", [{"role": "user", "content": VALIDATION_PROMPT}], max_tokens=1
5549
)
56-
50+
5751
@pytest.mark.asyncio
5852
async def test_ping_model_failure(self, mock_litellm):
5953
"""Test failed model ping"""
6054
# Configure mock to raise exception
6155
mock_litellm.side_effect = Exception("API Error")
6256
mock_console_instance = MagicMock()
63-
57+
6458
result = await ping_model("invalid-model", mock_console_instance)
65-
59+
6660
# Verify failure
6761
assert result is False
68-
62+
6963
# Verify error output
7064
mock_console_instance.print.assert_called_once_with(
7165
"[red]✗[/red] invalid-model - API Error"
7266
)
73-
67+
7468
@pytest.mark.asyncio
7569
async def test_bench_model_success(self, mock_litellm, mocker):
7670
"""Test successful benchmark run"""
7771
# Mock time to control duration measurement
78-
mock_time = mocker.patch('tacho.ai.time.time')
72+
mock_time = mocker.patch("tacho.ai.time.time")
7973
mock_time.side_effect = [100.0, 102.5] # 2.5 second duration
80-
74+
8175
# Configure mock response with usage data (no reasoning tokens)
8276
mock_response = MagicMock()
8377
mock_usage = MagicMock()
@@ -86,93 +80,75 @@ async def test_bench_model_success(self, mock_litellm, mocker):
8680
mock_usage.completion_tokens_details = None
8781
mock_response.usage = mock_usage
8882
mock_litellm.return_value = mock_response
89-
83+
9084
duration, tokens = await bench_model("gpt-4", 500)
91-
85+
9286
# Verify results
9387
assert duration == 2.5
9488
assert tokens == 150
95-
89+
9690
# Verify LLM was called correctly
9791
mock_litellm.assert_called_once_with(
98-
"gpt-4",
99-
[{"role": "user", "content": BENCHMARK_PROMPT}],
100-
max_tokens=500
92+
"gpt-4", [{"role": "user", "content": BENCHMARK_PROMPT}], max_tokens=500
10193
)
102-
103-
@pytest.mark.asyncio
104-
async def test_bench_model_no_usage_data(self, mock_litellm, mocker):
105-
"""Test benchmark when response has no usage data"""
106-
# Mock time
107-
mock_time = mocker.patch('tacho.ai.time.time')
108-
mock_time.side_effect = [100.0, 101.0]
109-
110-
# Configure mock response without usage
111-
mock_response = MagicMock()
112-
mock_response.usage = None
113-
mock_litellm.return_value = mock_response
114-
115-
duration, tokens = await bench_model("gpt-4", 500)
116-
117-
# Should return 0 tokens when no usage data
118-
assert duration == 1.0
119-
assert tokens == 0
120-
94+
12195
@pytest.mark.asyncio
12296
async def test_bench_model_exception_handling(self, mock_litellm):
12397
"""Test that exceptions propagate from bench_model"""
12498
mock_litellm.side_effect = Exception("Network error")
125-
99+
126100
with pytest.raises(Exception, match="Network error"):
127101
await bench_model("gpt-4", 500)
128-
102+
129103
@pytest.mark.asyncio
130104
async def test_bench_model_with_reasoning_tokens(self, mock_litellm, mocker):
131105
"""Test benchmark with reasoning models that have completion_tokens_details"""
132106
# Mock time
133-
mock_time = mocker.patch('tacho.ai.time.time')
107+
mock_time = mocker.patch("tacho.ai.time.time")
134108
mock_time.side_effect = [100.0, 103.0] # 3 second duration
135-
109+
136110
# Configure mock response with reasoning tokens
137111
mock_response = MagicMock()
138112
mock_response.usage.completion_tokens = 50 # Regular completion tokens
139-
113+
140114
# Mock completion_tokens_details with reasoning_tokens
141115
mock_details = MagicMock()
142116
mock_details.reasoning_tokens = 200 # Reasoning tokens
143117
mock_response.usage.completion_tokens_details = mock_details
144-
118+
145119
mock_litellm.return_value = mock_response
146-
120+
147121
duration, tokens = await bench_model("o1-mini", 500)
148-
122+
149123
# Verify results - should include both completion and reasoning tokens
150124
assert duration == 3.0
151125
assert tokens == 250 # 50 completion + 200 reasoning
152-
126+
153127
# Verify LLM was called correctly
154128
mock_litellm.assert_called_once_with(
155-
"o1-mini",
156-
[{"role": "user", "content": BENCHMARK_PROMPT}],
157-
max_tokens=500
129+
"o1-mini", [{"role": "user", "content": BENCHMARK_PROMPT}], max_tokens=500
158130
)
159-
131+
160132
@pytest.mark.asyncio
161-
async def test_bench_model_with_empty_completion_details(self, mock_litellm, mocker):
133+
async def test_bench_model_with_empty_completion_details(
134+
self, mock_litellm, mocker
135+
):
162136
"""Test benchmark when completion_tokens_details exists but has no reasoning_tokens"""
163137
# Mock time
164-
mock_time = mocker.patch('tacho.ai.time.time')
138+
mock_time = mocker.patch("tacho.ai.time.time")
165139
mock_time.side_effect = [100.0, 102.0]
166-
140+
167141
# Configure mock response with completion_tokens_details but no reasoning_tokens
168142
mock_response = MagicMock()
169143
mock_response.usage.completion_tokens = 100
170-
mock_response.usage.completion_tokens_details = MagicMock(spec=[]) # No reasoning_tokens attribute
171-
144+
mock_response.usage.completion_tokens_details = MagicMock(
145+
spec=[]
146+
) # No reasoning_tokens attribute
147+
172148
mock_litellm.return_value = mock_response
173-
149+
174150
duration, tokens = await bench_model("gpt-4", 500)
175-
151+
176152
# Should only count regular completion tokens
177153
assert duration == 2.0
178-
assert tokens == 100
154+
assert tokens == 100

0 commit comments

Comments
 (0)