Skip to content

Commit 6c7fd70

Browse files
committed
changes
1 parent e2fb75c commit 6c7fd70

10 files changed

Lines changed: 21 additions & 20 deletions

File tree

README.md

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,12 +1,12 @@
11
<p align="center">
2-
<img width="75%" alt="mvlm logo" src="assets/logo2.svg" /><br>
3-
<b>Quickly find the <u>m</u>inimum <u>v</u>iable <u>l</u>anguage <u>m</u>odel (mlmv) for your task, for faster and cheaper intelligence</b>
2+
<img width="75%" alt="smollest logo" src="assets/logo2.svg" /><br>
3+
<b>Quickly find the <u>s</u>mollest <u>m</u>inimum <u>v</u>iable <u>l</u>anguage <u>m</u>odel for your task, for faster and cheaper intelligence</b>
44
</p>
55

66
The basic idea is to run your OpenAI/Anthropic API queries to other, smaller models on Hugging Face API (or local), allowing you to quickly find the smallest/cheapest/fastest model that would work for your use case.
77

88
<p align="center">
9-
<img alt="mvlm dashboard screenshot" src="assets/screenshot.png" />
9+
<img alt="smollest dashboard screenshot" src="assets/screenshot.png" />
1010
</p>
1111

1212

@@ -78,7 +78,7 @@ Remote candidates run in parallel; local candidates run sequentially.
7878
## Dashboard
7979

8080
```bash
81-
mvlm show
81+
smollest show
8282
```
8383

8484
Opens a web dashboard with projects in the sidebar, a results table with truncation for long outputs, latency and cost per model, and aggregate match rates. The image above shows the UI, which you can reproduce by cloning this repo and running: `python examples/demo_dashboard.py`

assets/logo.svg

Lines changed: 3 additions & 3 deletions
Loading

assets/logo2.svg

Lines changed: 3 additions & 3 deletions
Loading

mvlm/anthropic.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -117,7 +117,7 @@ def __init__(
117117
import anthropic as _anthropic
118118
except ImportError:
119119
raise ImportError(
120-
"anthropic package is required. Install it with: pip install mvlm[anthropic]"
120+
"anthropic package is required. Install it with: pip install smollest[anthropic]"
121121
)
122122

123123
self._client = _anthropic.Anthropic(**kwargs)

mvlm/cli.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@ def main():
2121
project = args[1] if len(args) > 1 else None
2222
report(project=project)
2323
else:
24-
print("Usage: mvlm [show|report] [project]")
24+
print("Usage: smollest [show|report] [project]")
2525
sys.exit(1)
2626

2727

mvlm/openai.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -97,7 +97,7 @@ def __init__(
9797
import openai as _openai
9898
except ImportError:
9999
raise ImportError(
100-
"openai package is required. Install it with: pip install mvlm[openai]"
100+
"openai package is required. Install it with: pip install smollest[openai]"
101101
)
102102

103103
self._client = _openai.OpenAI(**kwargs)

mvlm/package.json

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
{
2-
"name": "mvlm",
2+
"name": "smollest",
33
"version": "0.1.1",
4-
"description": "Minimum Viable Language Model",
4+
"description": "Smollest",
55
"python": "true"
66
}

mvlm/results.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -71,7 +71,7 @@ def print_comparison(
7171
candidate_latencies: dict[str, float],
7272
) -> None:
7373
print(f"\n{'=' * 60}")
74-
print(f"mvlm comparison — baseline: {baseline_model} ({baseline_latency_ms:.0f}ms)")
74+
print(f"smollest comparison — baseline: {baseline_model} ({baseline_latency_ms:.0f}ms)")
7575
print(f"{'=' * 60}")
7676

7777
for comp in comparisons:
@@ -136,7 +136,7 @@ def report(project: str | None = None) -> None:
136136
candidates.setdefault(name, []).append(score)
137137

138138
print(f"\n{'=' * 60}")
139-
print(f"mvlm summary — project: {proj} ({len(entries)} comparisons)")
139+
print(f"smollest summary — project: {proj} ({len(entries)} comparisons)")
140140
print(f"{'=' * 60}")
141141

142142
for name, scores in sorted(candidates.items()):

mvlm/web.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@
1212
<head>
1313
<meta charset="UTF-8">
1414
<meta name="viewport" content="width=device-width, initial-scale=1.0">
15-
<title>mvlm — Minimum Viable Language Model</title>
15+
<title>smollest — Minimum Viable Language Model</title>
1616
<style>
1717
* { margin: 0; padding: 0; box-sizing: border-box; }
1818
svg.filters { position: absolute; width: 0; height: 0; }
@@ -157,7 +157,7 @@
157157
<body>
158158
159159
<div class="sidebar">
160-
<h1>mvlm</h1>
160+
<h1>smollest</h1>
161161
<h2>Projects</h2>
162162
<div id="project-list"></div>
163163
</div>
@@ -461,7 +461,7 @@ def show(port: int = 8765) -> None:
461461
server = HTTPServer(("127.0.0.1", port), _make_handler(page))
462462

463463
url = f"http://127.0.0.1:{port}"
464-
print(f"mvlm dashboard: {url}")
464+
print(f"smollest dashboard: {url}")
465465
print("Press Ctrl+C to stop")
466466

467467
threading.Timer(0.5, lambda: webbrowser.open(url)).start()

pyproject.toml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,7 @@ classifiers = [
2121
dynamic = ["version"]
2222

2323
[project.scripts]
24+
smollest = "mvlm.cli:main"
2425
mvlm = "mvlm.cli:main"
2526

2627
[project.urls]

0 commit comments

Comments
 (0)