Skip to content

Commit a1c09be

Browse files
committed
moved generate_response to llm.py
1 parent 2197332 commit a1c09be

File tree

3 files changed

+25
-20
lines changed

3 files changed

+25
-20
lines changed

cli/ai_model_manager.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,6 @@
22
import os
33
import threading
44
from cli.utils import spin_loader
5-
from cli.llm import gemini_api_output
65

76

87
class AIModelManager:
@@ -172,6 +171,8 @@ def generate_output(self, model_name, prompt_by_user):
172171
spinner_thread = threading.Thread(target=spin_loader, args=(stop_spinner,))
173172
spinner_thread.start()
174173

174+
from cli.llm import gemini_api_output
175+
175176
output = gemini_api_output(model_name, prompt_by_user)
176177

177178
stop_spinner.set()

cli/llm.py

Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,9 @@
1+
import os
2+
13
from google import genai
4+
from cli.ai_model_manager import AIModelManager
5+
from cli.prettify_llm_output import prettify_llm_output
6+
from cli.chat_history import ChatHistory
27

38

49
def gemini_api_output(model_name, prompt_by_user):
@@ -21,3 +26,20 @@ def gemini_api_output(model_name, prompt_by_user):
2126
response = client.models.generate_content(model=model_name, contents=prompt_by_user)
2227

2328
return response.text
29+
30+
31+
def generate_response(prompt: str, manager: AIModelManager, history: ChatHistory):
32+
system_instruction = (
33+
"System prompt: Give response in short and MD format, "
34+
"if asked for commands then give commands and don't explain too much"
35+
)
36+
full_prompt = f"{prompt}\n{system_instruction}"
37+
history.append("user", full_prompt)
38+
flat_prompt = history.get_prompt()
39+
models = manager.load()
40+
selected = models.get("selected_model")
41+
if not selected:
42+
selected = "gemini-1.5-flash"
43+
response = manager.generate_output(selected, flat_prompt)
44+
history.append("assistant", response or "")
45+
prettify_llm_output(response)

cli/prompt.py

Lines changed: 1 addition & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -1,32 +1,14 @@
11
import os
2-
import json
32
import sys
43
import argparse
54
from cli.ai_model_manager import AIModelManager
6-
from cli.prettify_llm_output import prettify_llm_output
75
from cli.chat_history import ChatHistory, get_bash_history
6+
from cli.llm import generate_response
87

98
HISTORY_FILE = os.path.expanduser("~/.cache/cli_chat_history.json")
109
DEFAULT_BASH_HISTORY_COUNT = 3
1110

1211

13-
def generate_response(prompt: str, manager: AIModelManager, history: ChatHistory):
14-
system_instruction = (
15-
"System prompt: Give response in short and MD format, "
16-
"if asked for commands then give commands and don't explain too much"
17-
)
18-
full_prompt = f"{prompt}\n{system_instruction}"
19-
history.append("user", full_prompt)
20-
flat_prompt = history.get_prompt()
21-
models = manager.load()
22-
selected = models.get("selected_model")
23-
if not selected:
24-
selected = "gemini-1.5-flash"
25-
response = manager.generate_output(selected, flat_prompt)
26-
history.append("assistant", response or "")
27-
prettify_llm_output(response)
28-
29-
3012
def main():
3113
raw = sys.argv[1:]
3214
known_cmds = ["chat", "debug", "config", "list", "remove", "select"]

0 commit comments

Comments
 (0)