Skip to content

Commit 1c58566

Browse files
authored
Added option to run sgpt with LocalAI (#307)
1 parent 4aed53b commit 1c58566

File tree

5 files changed

+15
-39
lines changed

5 files changed

+15
-39
lines changed

README.md

+5-2
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
# ShellGPT
2-
A command-line productivity tool powered by OpenAI's GPT models. As developers, we can leverage AI capabilities to generate shell commands, code snippets, comments, and documentation, among other things. Forget about cheat sheets and notes, with this tool you can get accurate answers right in your terminal, and you'll probably find yourself reducing your daily Google searches, saving you valuable time and effort. ShellGPT is cross-platform compatible and supports all major operating systems, including Linux, macOS, and Windows with all major shells, such as PowerShell, CMD, Bash, Zsh, Fish, and many others.
2+
A command-line productivity tool powered by AI large language models (LLM). As developers, we can leverage AI capabilities to generate shell commands, code snippets, comments, and documentation, among other things. Forget about cheat sheets and notes, with this tool you can get accurate answers right in your terminal, and you'll probably find yourself reducing your daily Google searches, saving you valuable time and effort. ShellGPT is cross-platform compatible and supports all major operating systems, including Linux, macOS, and Windows with all major shells, such as PowerShell, CMD, Bash, Zsh, Fish, and many others.
33

44
https://user-images.githubusercontent.com/16740832/231569156-a3a9f9d4-18b1-4fff-a6e1-6807651aa894.mp4
55

@@ -358,7 +358,7 @@ Switch `SYSTEM_ROLES` to force use [system roles](https://help.openai.com/en/art
358358
│ prompt [PROMPT] The prompt to generate completions for. │
359359
╰─────────────────────────────────────────────────────────────────────────────────────────────────────────────╯
360360
╭─ Options ───────────────────────────────────────────────────────────────────────────────────────────────────╮
361-
│ --model [gpt-4|gpt-4-32k|gpt-3.5|gpt-3.5-16k] OpenAI GPT model to use. [default: gpt-3.5-turbo] │
361+
│ --model TEXT OpenAI GPT model to use. [default: gpt-3.5-turbo] │
362362
│ --temperature FLOAT RANGE [0.0<=x<=2.0] Randomness of generated output. [default: 0.1] │
363363
│ --top-probability FLOAT RANGE [0.1<=x<=1.0] Limits highest probable tokens (words). [default: 1.0] │
364364
│ --editor Open $EDITOR to provide a prompt. [default: no-editor] │
@@ -384,6 +384,9 @@ Switch `SYSTEM_ROLES` to force use [system roles](https://help.openai.com/en/art
384384
╰─────────────────────────────────────────────────────────────────────────────────────────────────────────────╯
385385
```
386386
387+
## LocalAI
388+
By default, ShellGPT leverages OpenAI's large language models. However, it also provides the flexibility to use locally hosted models, which can be a cost-effective alternative. To use local models, you will need to run your own API server. You can accomplish this by using [LocalAI](https://github.com/go-skynet/LocalAI), a self-hosted, OpenAI-compatible API. Setting up LocalAI allows you to run language models on your own hardware, potentially without the need for an internet connection, depending on your usage. To set up your LocalAI, please follow this comprehensive [guide](https://github.com/TheR1D/shell_gpt/wiki/LocalAI). Remember that the performance of your local models may depend on the specifications of your hardware and the specific language model you choose to deploy.
389+
387390
## Docker
388391
Run the container using the `OPENAI_API_KEY` environment variable, and a docker volume to store cache:
389392
```shell

sgpt/__init__.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
11
from .app import main as main
22
from .app import entry_point as cli # noqa: F401
33

4-
__version__ = "0.9.3"
4+
__version__ = "0.9.4"

sgpt/app.py

+8-20
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,3 @@
1-
"""
2-
This module provides a simple interface for OpenAI API using Typer
3-
as the command line interface. It supports different modes of output including
4-
shell commands and code, and allows users to specify the desired OpenAI model
5-
and length and other options of the output. Additionally, it supports executing
6-
shell commands directly from the interface.
7-
"""
81
# To allow users to use arrow keys in the REPL.
92
import readline # noqa: F401
103
import sys
@@ -18,12 +11,7 @@
1811
from sgpt.handlers.default_handler import DefaultHandler
1912
from sgpt.handlers.repl_handler import ReplHandler
2013
from sgpt.role import DefaultRoles, SystemRole
21-
from sgpt.utils import (
22-
ModelOptions,
23-
get_edited_prompt,
24-
install_shell_integration,
25-
run_command,
26-
)
14+
from sgpt.utils import get_edited_prompt, install_shell_integration, run_command
2715

2816

2917
def main(
@@ -32,9 +20,9 @@ def main(
3220
show_default=False,
3321
help="The prompt to generate completions for.",
3422
),
35-
model: ModelOptions = typer.Option(
36-
ModelOptions(cfg.get("DEFAULT_MODEL")).value,
37-
help="OpenAI GPT model to use.",
23+
model: str = typer.Option(
24+
cfg.get("DEFAULT_MODEL"),
25+
help="Large language model to use.",
3826
),
3927
temperature: float = typer.Option(
4028
0.1,
@@ -159,7 +147,7 @@ def main(
159147
# Will be in infinite loop here until user exits with Ctrl+C.
160148
ReplHandler(repl, role_class).handle(
161149
prompt,
162-
model=model.value,
150+
model=model,
163151
temperature=temperature,
164152
top_probability=top_probability,
165153
chat_id=repl,
@@ -169,7 +157,7 @@ def main(
169157
if chat:
170158
full_completion = ChatHandler(chat, role_class).handle(
171159
prompt,
172-
model=model.value,
160+
model=model,
173161
temperature=temperature,
174162
top_probability=top_probability,
175163
chat_id=chat,
@@ -178,7 +166,7 @@ def main(
178166
else:
179167
full_completion = DefaultHandler(role_class).handle(
180168
prompt,
181-
model=model.value,
169+
model=model,
182170
temperature=temperature,
183171
top_probability=top_probability,
184172
caching=cache,
@@ -198,7 +186,7 @@ def main(
198186
elif option == "d":
199187
DefaultHandler(DefaultRoles.DESCRIBE_SHELL.get_role()).handle(
200188
full_completion,
201-
model=model.value,
189+
model=model,
202190
temperature=temperature,
203191
top_probability=top_probability,
204192
caching=cache,

sgpt/config.py

+1-3
Original file line numberDiff line numberDiff line change
@@ -6,8 +6,6 @@
66

77
from click import UsageError
88

9-
from .utils import ModelOptions
10-
119
CONFIG_FOLDER = os.path.expanduser("~/.config")
1210
SHELL_GPT_CONFIG_FOLDER = Path(CONFIG_FOLDER) / "shell_gpt"
1311
SHELL_GPT_CONFIG_PATH = SHELL_GPT_CONFIG_FOLDER / ".sgptrc"
@@ -23,7 +21,7 @@
2321
"CHAT_CACHE_LENGTH": int(os.getenv("CHAT_CACHE_LENGTH", "100")),
2422
"CACHE_LENGTH": int(os.getenv("CHAT_CACHE_LENGTH", "100")),
2523
"REQUEST_TIMEOUT": int(os.getenv("REQUEST_TIMEOUT", "60")),
26-
"DEFAULT_MODEL": os.getenv("DEFAULT_MODEL", ModelOptions.GPT35TURBO.value),
24+
"DEFAULT_MODEL": os.getenv("DEFAULT_MODEL", "gpt-3.5-turbo"),
2725
"OPENAI_API_HOST": os.getenv("OPENAI_API_HOST", "https://api.openai.com"),
2826
"DEFAULT_COLOR": os.getenv("DEFAULT_COLOR", "magenta"),
2927
"ROLE_STORAGE_PATH": os.getenv("ROLE_STORAGE_PATH", str(ROLE_STORAGE_PATH)),

sgpt/utils.py

-13
Original file line numberDiff line numberDiff line change
@@ -1,26 +1,13 @@
11
import os
22
import platform
33
import shlex
4-
from enum import Enum
54
from tempfile import NamedTemporaryFile
65
from typing import Any, Callable
76

87
import typer
98
from click import BadParameter
109

1110

12-
class ModelOptions(str, Enum):
13-
"""
14-
Model endpoint compatibility
15-
https://platform.openai.com/docs/models/model-endpoint-compatibility
16-
"""
17-
18-
GPT4 = "gpt-4"
19-
GPT432k = "gpt-4-32k"
20-
GPT35TURBO = "gpt-3.5-turbo"
21-
GPT35TURBO16K = "gpt-3.5-turbo-16k"
22-
23-
2411
def get_edited_prompt() -> str:
2512
"""
2613
Opens the user's default editor to let them

0 commit comments

Comments
 (0)