Skip to content

Commit a92b9e8

Browse files
committed
Add basic terminal command use
Adds a tool that enables Sodalis to use basic Linux commands in the terminal and include the results in responses to the user.
1 parent b8a11d7 commit a92b9e8

5 files changed

Lines changed: 216 additions & 12 deletions

File tree

config/companions/default.yaml

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,3 +2,9 @@ name: Default
22
model: qwen3:8b
33
system_prompt: system_prompt.txt
44
temperature: 0.7
5+
6+
tools:
7+
enabled: true
8+
timeout: 10
9+
max_output: 4096
10+
max_rounds: 3

config/prompts/system_prompt.txt

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1 +1,2 @@
1-
You are Sodalis, a local AI companion running on a Linux machine. Be practical, concise, and clear.
1+
You are a local AI companion running on a Linux machine. Be practical, concise, and clear.
2+
You can run read-only Linux commands when needed to answer questions. Use the tool format described below when you need to inspect the system.

src/sodalis/cli.py

Lines changed: 45 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -2,15 +2,18 @@
22

33
from sodalis.config import load_companion_config
44
from sodalis.paths import ensure_runtime_dirs, get_prompts_dir
5-
from sodalis.llm import generate_response
5+
from sodalis.llm import generate_response, generate_with_tools
66
from sodalis.db import init_db
77
from sodalis.history import create_chat, delete_chat, list_chats, chat_exists, load_messages, save_message
88
from sodalis.prompt_builder import build_prompt, load_prompt_layers
9+
from sodalis.tools import TOOL_SCHEMA_TEXT
910

1011

11-
def run_chat(companion, system_prompt, companion_prompt, chat_id = None, resume_chat_id = None):
12+
def run_chat(companion, system_prompt, companion_prompt, chat_id = None, resume_chat_id = None, use_tools = True):
1213
print(f"Sodalis {companion['name']} ({companion['model']})")
1314

15+
tools_cfg = companion.get("tools", {})
16+
1417
history = []
1518
chat_id = None
1619

@@ -45,7 +48,17 @@ def run_chat(companion, system_prompt, companion_prompt, chat_id = None, resume_
4548

4649
prompt = build_prompt(system_prompt, companion_prompt, history, user_input)
4750

48-
response = generate_response(
51+
if use_tools and tools_cfg.get("enabled", True):
52+
response = generate_with_tools(
53+
prompt,
54+
companion["model"],
55+
companion["temperature"],
56+
max_rounds=tools_cfg.get("max_rounds", 3),
57+
timeout=tools_cfg.get("timeout", 10),
58+
max_output=tools_cfg.get("max_output", 4096),
59+
)
60+
else:
61+
response = generate_response(
4962
prompt,
5063
companion["model"],
5164
companion["temperature"]
@@ -63,15 +76,26 @@ def run_chat(companion, system_prompt, companion_prompt, chat_id = None, resume_
6376
break
6477

6578

66-
def run_once(companion, system_prompt, message):
79+
def run_once(companion, system_prompt, message, use_tools=True):
6780

81+
tools_cfg = companion.get("tools", {})
6882
full_prompt = f"{system_prompt}\n\nUSER: {message}\nASSISTANT:"
6983

70-
response = generate_response(
71-
prompt=full_prompt,
72-
model=companion["model"],
73-
temperature=companion["temperature"],
74-
)
84+
if use_tools and tools_cfg.get("enabled", True):
85+
response = generate_with_tools(
86+
prompt=full_prompt,
87+
model=companion["model"],
88+
temperature=companion["temperature"],
89+
max_rounds=tools_cfg.get("max_rounds", 3),
90+
timeout=tools_cfg.get("timeout", 10),
91+
max_output=tools_cfg.get("max_output", 4096),
92+
)
93+
else:
94+
response = generate_response(
95+
prompt=full_prompt,
96+
model=companion["model"],
97+
temperature=companion["temperature"],
98+
)
7599

76100
print(response)
77101

@@ -142,13 +166,23 @@ def main() -> None:
142166
metavar="CHAT_ID",
143167
help="Delete a saved chat by ID",
144168
)
169+
parser.add_argument(
170+
"--no-tools",
171+
action="store_true",
172+
help="Disable tool use for this invocation",
173+
)
145174

146175
args = parser.parse_args()
147176
ensure_runtime_dirs()
148177

149178
companion = load_companion_config(args.companion)
150179
companion_name = companion.get("name", args.companion)
151180
system_prompt, companion_prompt = load_prompt_layers(companion)
181+
182+
use_tools = not args.no_tools
183+
tools_cfg = companion.get("tools", {})
184+
if use_tools and tools_cfg.get("enabled", True):
185+
system_prompt = system_prompt + "\n\n" + TOOL_SCHEMA_TEXT
152186

153187
if args.history:
154188
chats = list_chats()
@@ -171,9 +205,9 @@ def main() -> None:
171205

172206
if args.chat is not None:
173207
if args.chat == "new":
174-
run_chat(companion, system_prompt, companion_prompt)
208+
run_chat(companion, system_prompt, companion_prompt, use_tools=use_tools)
175209
else:
176-
run_chat(companion, system_prompt, companion_prompt, resume_chat_id=int(args.chat))
210+
run_chat(companion, system_prompt, companion_prompt, resume_chat_id=int(args.chat), use_tools=use_tools)
177211

178212

179213
if __name__ == "__main__":

src/sodalis/llm.py

Lines changed: 33 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,7 @@
11
import requests
22

3+
from sodalis.tools import parse_tool_call, run_command, ToolError
4+
35

46
def generate_response(
57
prompt: str,
@@ -23,3 +25,34 @@ def generate_response(
2325
data = response.json()
2426

2527
return data.get("response", "").strip()
28+
29+
30+
def generate_with_tools(
31+
prompt: str,
32+
model: str,
33+
temperature: float,
34+
max_rounds: int = 3,
35+
timeout: int = 10,
36+
max_output: int = 4096,
37+
) -> str:
38+
response_text = generate_response(prompt, model, temperature)
39+
40+
for _ in range(max_rounds):
41+
tool_call = parse_tool_call(response_text)
42+
if tool_call is None:
43+
break
44+
45+
try:
46+
output = run_command(
47+
tool_call["command"],
48+
tool_call["args"],
49+
timeout=timeout,
50+
max_output=max_output,
51+
)
52+
except ToolError as e:
53+
output = f"Error: {e}"
54+
55+
followup = prompt + f"\n\n[ASSISTANT]\n{response_text}\n\n[TOOL OUTPUT]\n{output}\n\n[ASSISTANT]"
56+
response_text = generate_response(followup, model, temperature)
57+
58+
return response_text

src/sodalis/tools.py

Lines changed: 130 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,130 @@
1+
import json
2+
import re
3+
import shutil
4+
import subprocess
5+
from pathlib import Path
6+
7+
8+
class ToolError(Exception):
9+
pass
10+
11+
12+
# Resolve allowed commands at import time to prevent PATH hijacking
13+
_COMMAND_NAMES = [
14+
"cat", "grep", "echo", "ls", "pwd", "head", "tail", "wc",
15+
"find", "stat", "file", "du", "df", "uname", "whoami", "id",
16+
"date", "printenv", "env",
17+
]
18+
19+
ALLOWED_COMMANDS: dict[str, Path | None] = {}
20+
for _cmd in _COMMAND_NAMES:
21+
_resolved = shutil.which(_cmd)
22+
if _resolved:
23+
ALLOWED_COMMANDS[_cmd] = Path(_resolved)
24+
25+
# Commands that must never appear, even as arguments
26+
_BLOCKED_TOKENS = {"sudo", "su", "doas"}
27+
28+
_MAX_ARG_LEN = 1024
29+
30+
_TOOL_CALL_RE = re.compile(
31+
r"```tool\s*\n(\{.*?\})\s*\n```",
32+
re.DOTALL,
33+
)
34+
35+
36+
TOOL_SCHEMA_TEXT = """\
37+
You have access to read-only Linux commands. To run a command, output a fenced \
38+
JSON block tagged "tool" with the keys "command" (string) and "args" (list of strings).
39+
40+
Example:
41+
```tool
42+
{"command": "ls", "args": ["-la", "/tmp"]}
43+
```
44+
45+
Available commands: """ + ", ".join(sorted(ALLOWED_COMMANDS.keys())) + """
46+
47+
Rules:
48+
- Only one tool call per response.
49+
- Do NOT wrap the tool block inside another code block.
50+
- If you don't need a command, just reply normally with no tool block.
51+
- The tool output will be provided back to you so you can give a final answer.
52+
"""
53+
54+
55+
def _validate_args(args: list[str]) -> None:
56+
for arg in args:
57+
if "\x00" in arg:
58+
raise ToolError("Null bytes are not allowed in arguments")
59+
if len(arg) > _MAX_ARG_LEN:
60+
raise ToolError(f"Argument exceeds maximum length of {_MAX_ARG_LEN}")
61+
lower = arg.lower()
62+
for blocked in _BLOCKED_TOKENS:
63+
if lower == blocked:
64+
raise ToolError(f"Blocked token in arguments: {blocked}")
65+
66+
67+
def run_command(
68+
name: str,
69+
args: list[str],
70+
timeout: int = 10,
71+
max_output: int = 4096,
72+
) -> str:
73+
if name not in ALLOWED_COMMANDS:
74+
raise ToolError(f"Command not allowed: {name}")
75+
76+
resolved = ALLOWED_COMMANDS[name]
77+
if resolved is None:
78+
raise ToolError(f"Command not found on system: {name}")
79+
80+
_validate_args(args)
81+
82+
cmd = [str(resolved)] + args
83+
84+
try:
85+
result = subprocess.run(
86+
cmd,
87+
capture_output=True,
88+
text=True,
89+
timeout=timeout,
90+
shell=False,
91+
)
92+
except subprocess.TimeoutExpired:
93+
raise ToolError(f"Command timed out after {timeout}s")
94+
95+
output = result.stdout + result.stderr
96+
97+
if result.returncode != 0:
98+
raise ToolError(
99+
f"Command exited with code {result.returncode}:\n"
100+
+ output[:max_output]
101+
)
102+
103+
if len(output) > max_output:
104+
output = output[:max_output] + "\n... (output truncated)"
105+
106+
return output
107+
108+
109+
def parse_tool_call(response: str) -> dict | None:
110+
match = _TOOL_CALL_RE.search(response)
111+
if not match:
112+
return None
113+
114+
try:
115+
data = json.loads(match.group(1))
116+
except json.JSONDecodeError:
117+
return None
118+
119+
if not isinstance(data, dict):
120+
return None
121+
if "command" not in data or not isinstance(data["command"], str):
122+
return None
123+
if "args" not in data:
124+
data["args"] = []
125+
if not isinstance(data["args"], list):
126+
return None
127+
if not all(isinstance(a, str) for a in data["args"]):
128+
return None
129+
130+
return data

0 commit comments

Comments
 (0)