Skip to content

Commit 80350c1

Browse files
committed
ping feature
1 parent 74e417a commit 80350c1

File tree

4 files changed

+68
-1
lines changed

4 files changed

+68
-1
lines changed

CHANGELOG.md

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,15 @@ All notable changes to this project will be documented in this file.
55
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
66
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
77

8+
## [0.4.0] - 2025-01-27
9+
10+
### Added
11+
- New `ping` command to check model availability without running benchmarks
12+
- Tests for the ping command functionality
13+
14+
### Fixed
15+
- CLI callback logic to properly handle subcommands
16+
817
## [0.3.0] - 2025-01-26
918

1019
### Changed

pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
[project]
22
name = "tacho"
3-
version = "0.3.0"
3+
version = "0.4.0"
44
description = "CLI tool for measuring and comparing LLM inference speeds"
55
readme = "README.md"
66
authors = [

tacho/cli.py

Lines changed: 32 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,11 @@ def cli_main(
2121
lim: int = typer.Option(500, "--lim", "-l"),
2222
):
2323
"""Default command when models are provided directly"""
24+
# Check if the first argument is a known command
25+
if models and models[0] in ["bench", "ping"]:
26+
# This is a subcommand, let it handle the arguments
27+
return
28+
2429
if ctx.invoked_subcommand is None and models:
2530
bench(models, runs, prompt, lim)
2631

@@ -81,6 +86,33 @@ def bench(
8186
console.print(table)
8287

8388

89+
@app.command()
90+
def ping(
91+
models: list[str] = typer.Argument(
92+
...,
93+
help="List of models to check availability (e.g., gpt-4o gemini-2.5-flash)",
94+
),
95+
):
96+
"""Check which LLM models are accessible without running benchmarks"""
97+
res = asyncio.run(ping_models(models))
98+
99+
# Count successful models
100+
successful = sum(res)
101+
102+
# Print summary
103+
console.print()
104+
if successful == len(models):
105+
console.print(f"[bold green]All {len(models)} models are accessible![/bold green]")
106+
elif successful > 0:
107+
console.print(f"[bold yellow]{successful}/{len(models)} models are accessible[/bold yellow]")
108+
else:
109+
console.print("[bold red]No models are accessible[/bold red]")
110+
111+
# Exit with appropriate code
112+
if successful == 0:
113+
raise typer.Exit(1)
114+
115+
84116
def main():
85117
"""Main entry point that suppresses warnings on exit."""
86118
os.environ["PYTHONWARNINGS"] = "ignore"

tests/test_integration.py

Lines changed: 26 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -63,3 +63,29 @@ def test_error_handling_no_api_key(self):
6363
if original_key:
6464
os.environ["OPENAI_API_KEY"] = original_key
6565

66+
def test_ping_single_model(self):
67+
if not os.getenv("OPENAI_API_KEY"):
68+
pytest.skip("OPENAI_API_KEY not set")
69+
70+
result = runner.invoke(app, ["ping", "gpt-4.1-mini"])
71+
assert result.exit_code == 0
72+
assert "✓ gpt-4.1-mini" in result.stdout
73+
assert "models are accessible" in result.stdout
74+
75+
def test_ping_multiple_models(self):
76+
if not os.getenv("OPENAI_API_KEY"):
77+
pytest.skip("OPENAI_API_KEY not set")
78+
79+
result = runner.invoke(app, ["ping", "gpt-4.1-mini", "invalid-model-xyz"])
80+
assert result.exit_code == 0
81+
assert "✓ gpt-4.1-mini" in result.stdout
82+
assert "✗ invalid-model-xyz" in result.stdout
83+
assert "1/2 models are accessible" in result.stdout
84+
85+
def test_ping_all_invalid_models(self):
86+
result = runner.invoke(app, ["ping", "invalid-model-1", "invalid-model-2"])
87+
assert result.exit_code == 1
88+
assert "✗ invalid-model-1" in result.stdout
89+
assert "✗ invalid-model-2" in result.stdout
90+
assert "No models are accessible" in result.stdout
91+

0 commit comments

Comments
 (0)