Skip to content

Commit a497d96

Browse files
authored
Merge pull request #1 from Joshua92500/dev
Update v0.1.0
2 parents c2ab372 + a92b9e8 commit a497d96

12 files changed

Lines changed: 732 additions & 22 deletions

File tree

.gitignore

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,10 @@
1-
# Ignore local Python environment
1+
# Ignore local development environment
22
__pycache__/
33
*.pyc
44
.venv/
55
*.egg-info/
6+
*.fish
7+
*.sh
68

79

810
# Ignore custom companions and prompts

README.md

Lines changed: 127 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1 +1,127 @@
1-
# sodalis
1+
# Sodalis
2+
3+
A local AI companion and system assistant powered by [Ollama](https://ollama.com). Run a fully offline, customizable AI from your terminal.
4+
5+
---
6+
7+
## Features
8+
9+
- **CLI-first** — send messages directly from your terminal
10+
- **Companion profiles** — swap between different AI personas via YAML config
11+
- **Custom system prompts** — define your companion's behavior with plain text files
12+
- **Ollama backend** — runs models locally via the Ollama API
13+
14+
---
15+
16+
## Requirements
17+
18+
- Python 3.11+
19+
- [Ollama](https://ollama.com) running locally (or via Docker)
20+
- An Ollama-compatible model pulled
21+
22+
---
23+
24+
## Installation
25+
26+
```bash
27+
git clone https://github.com/Joshua92500/sodalis.git
28+
cd sodalis
29+
pip install -e .
30+
```
31+
32+
This installs the `sodalis` CLI command.
33+
34+
---
35+
36+
## Running Ollama
37+
38+
You can run Ollama locally, or use the included Docker Compose file (requires NVIDIA GPU):
39+
40+
```bash
41+
docker compose up -d
42+
```
43+
44+
Then pull a model:
45+
46+
```bash
47+
docker exec -it sodalis-ollama ollama pull qwen3:8b
48+
```
49+
50+
---
51+
52+
## Configuration
53+
54+
Sodalis looks for companion profiles and system prompts in `~/.config/sodalis/`.
55+
56+
Copy the defaults to get started:
57+
58+
```bash
59+
mkdir -p ~/.config/sodalis/companions ~/.config/sodalis/prompts
60+
cp config/companions/default.yaml ~/.config/sodalis/companions/
61+
cp config/prompts/system_prompt.txt ~/.config/sodalis/prompts/
62+
```
63+
64+
### Companion profile (`companions/default.yaml`)
65+
66+
```yaml
67+
name: Default
68+
model: qwen3:8b
69+
system_prompt: system_prompt.txt
70+
temperature: 0.7
71+
```
72+
73+
| Field | Description |
74+
|---|---|
75+
| `name` | Display name for the companion |
76+
| `model` | Ollama model to use |
77+
| `system_prompt` | Filename of the system prompt in `~/.config/sodalis/prompts/` |
78+
| `temperature` | Sampling temperature (0.0–1.0) |
79+
80+
### System prompt (`prompts/system_prompt.txt`)
81+
82+
Plain text file that sets the personality and behavior of the companion.
83+
84+
---
85+
86+
## Usage
87+
88+
```bash
89+
sodalis --chat
90+
```
91+
92+
Use a specific companion profile:
93+
94+
```bash
95+
sodalis --chat --companion="MyCompanion"
96+
```
97+
98+
---
99+
100+
## Project Structure
101+
102+
```
103+
sodalis/
104+
├── config/
105+
│ ├── companions/ # Default companion profiles
106+
│ └── prompts/ # Default system prompts
107+
├── src/sodalis/
108+
│ ├── cli.py # Entry point and argument parsing
109+
│ ├── config.py # Companion config loader
110+
│ ├── db.py # SQLite database client
111+
│ ├── history.py # Chat history handling
112+
│ ├── llm.py # Ollama API client
113+
│ └── paths.py # XDG-style path helpers
114+
├── docker-compose.yml # Ollama service with GPU support
115+
└── pyproject.toml # Project metadata and dependencies
116+
```
117+
118+
---
119+
120+
## Runtime Directories
121+
122+
Sodalis follows XDG conventions:
123+
124+
| Purpose | Path |
125+
|---|---|
126+
| Config & companions | `~/.config/sodalis/` |
127+
| Data (memory DB) | `~/.local/share/sodalis/` |

config/companions/default.yaml

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,3 +2,9 @@ name: Default
22
model: qwen3:8b
33
system_prompt: system_prompt.txt
44
temperature: 0.7
5+
6+
tools:
7+
enabled: true
8+
timeout: 10
9+
max_output: 4096
10+
max_rounds: 3

config/prompts/system_prompt.txt

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1 +1,2 @@
1-
You are Sodalis, a local AI companion running on a Linux machine. Be practical, concise, and clear.
1+
You are a local AI companion running on a Linux machine. Be practical, concise, and clear.
2+
You can run read-only Linux commands when needed to answer questions. Use the tool format described below when you need to inspect the system.

src/sodalis/cli.py

Lines changed: 183 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -2,48 +2,213 @@
22

33
from sodalis.config import load_companion_config
44
from sodalis.paths import ensure_runtime_dirs, get_prompts_dir
5-
from sodalis.llm import generate_response
5+
from sodalis.llm import generate_response, generate_with_tools
6+
from sodalis.db import init_db
7+
from sodalis.history import create_chat, delete_chat, list_chats, chat_exists, load_messages, save_message
8+
from sodalis.prompt_builder import build_prompt, load_prompt_layers
9+
from sodalis.tools import TOOL_SCHEMA_TEXT
610

711

8-
def load_system_prompt(filename: str) -> str:
9-
prompt_path = get_prompts_dir() / filename
12+
def run_chat(companion, system_prompt, companion_prompt, chat_id = None, resume_chat_id = None, use_tools = True):
13+
print(f"Sodalis {companion['name']} ({companion['model']})")
14+
15+
tools_cfg = companion.get("tools", {})
16+
17+
history = []
18+
chat_id = None
1019

11-
if not prompt_path.exists():
12-
raise FileNotFoundError(f"System prompt not found: {prompt_path}")
20+
if resume_chat_id is not None:
21+
if not chat_exists(resume_chat_id):
22+
print(f"Chat {resume_chat_id} not found.")
23+
return
1324

14-
return prompt_path.read_text(encoding="utf-8").strip()
25+
chat_id = resume_chat_id
26+
messages = load_messages(chat_id)
27+
history = db_messages_to_prompt_history(messages)
28+
29+
print(f"Resuming chat {chat_id}.")
30+
31+
while True:
32+
try:
33+
user_input = input("> ").strip()
34+
35+
if user_input.lower() in {"exit", "quit"}:
36+
print("Goodbye.")
37+
break
38+
39+
if not user_input:
40+
continue
41+
42+
if chat_id is None:
43+
chat_id = create_chat(
44+
companion=companion["name"],
45+
model=companion["model"],
46+
)
47+
save_message(chat_id, "user", user_input)
48+
49+
prompt = build_prompt(system_prompt, companion_prompt, history, user_input)
50+
51+
if use_tools and tools_cfg.get("enabled", True):
52+
response = generate_with_tools(
53+
prompt,
54+
companion["model"],
55+
companion["temperature"],
56+
max_rounds=tools_cfg.get("max_rounds", 3),
57+
timeout=tools_cfg.get("timeout", 10),
58+
max_output=tools_cfg.get("max_output", 4096),
59+
)
60+
else:
61+
response = generate_response(
62+
prompt,
63+
companion["model"],
64+
companion["temperature"]
65+
)
66+
67+
print(response)
68+
69+
save_message(chat_id, "assistant", response)
70+
71+
history.append({"role": "user", "content": user_input})
72+
history.append({"role": "assistant", "content": response})
73+
74+
except KeyboardInterrupt:
75+
print("\nExiting.")
76+
break
77+
78+
79+
def run_once(companion, system_prompt, message, use_tools=True):
80+
81+
tools_cfg = companion.get("tools", {})
82+
full_prompt = f"{system_prompt}\n\nUSER: {message}\nASSISTANT:"
83+
84+
if use_tools and tools_cfg.get("enabled", True):
85+
response = generate_with_tools(
86+
prompt=full_prompt,
87+
model=companion["model"],
88+
temperature=companion["temperature"],
89+
max_rounds=tools_cfg.get("max_rounds", 3),
90+
timeout=tools_cfg.get("timeout", 10),
91+
max_output=tools_cfg.get("max_output", 4096),
92+
)
93+
else:
94+
response = generate_response(
95+
prompt=full_prompt,
96+
model=companion["model"],
97+
temperature=companion["temperature"],
98+
)
99+
100+
print(response)
101+
102+
103+
def print_history_table(chats):
104+
if not chats:
105+
print("No saved chats found.")
106+
return
107+
108+
print(f"{'ID':<6} {'MODEL':<15} {'TITLE':<50} {'CREATED'}")
109+
print("-" * 95)
110+
111+
for chat_id, model, title, created_at in chats:
112+
title = (title or "")[:50]
113+
print(f"{chat_id:<6} {model:<15} {title:<50} {created_at}")
114+
115+
116+
def confirm_delete(chat_id: int) -> bool:
117+
answer = input(f"Are you sure you want to delete chat {chat_id}? [y/N]: ").strip().lower()
118+
return answer in {"y", "yes"}
119+
120+
121+
def db_messages_to_prompt_history(messages: tuple):
122+
history = []
123+
124+
for role, content in messages:
125+
if role == "user":
126+
prompt_role = "USER"
127+
elif role == "assistant":
128+
prompt_role = "ASSISTANT"
129+
else:
130+
continue
131+
132+
history.append({"role": prompt_role, "content": content})
133+
134+
return history
15135

16136

17137
def main() -> None:
138+
init_db()
139+
18140
parser = argparse.ArgumentParser(prog="sodalis")
19141
parser.add_argument(
20142
"message",
143+
nargs="?",
21144
help="Message to send to the companion",
22145
)
23146
parser.add_argument(
24147
"--companion",
25148
default="default",
26149
help="Name of the companion profile to load",
27150
)
28-
args = parser.parse_args()
151+
parser.add_argument(
152+
"--chat",
153+
nargs="?",
154+
const="new",
155+
metavar="CHAT_ID",
156+
help="Start a new chat or resume an exisitng chat by ID"
157+
)
158+
parser.add_argument(
159+
"--history",
160+
action="store_true",
161+
help="Show saved chat history",
162+
)
163+
parser.add_argument(
164+
"--delete",
165+
type=int,
166+
metavar="CHAT_ID",
167+
help="Delete a saved chat by ID",
168+
)
169+
parser.add_argument(
170+
"--no-tools",
171+
action="store_true",
172+
help="Disable tool use for this invocation",
173+
)
29174

175+
args = parser.parse_args()
30176
ensure_runtime_dirs()
177+
178+
companion = load_companion_config(args.companion)
179+
companion_name = companion.get("name", args.companion)
180+
system_prompt, companion_prompt = load_prompt_layers(companion)
31181

32-
config = load_companion_config(args.companion)
33-
companion_name = config.get("name", args.companion)
182+
use_tools = not args.no_tools
183+
tools_cfg = companion.get("tools", {})
184+
if use_tools and tools_cfg.get("enabled", True):
185+
system_prompt = system_prompt + "\n\n" + TOOL_SCHEMA_TEXT
186+
187+
if args.history:
188+
chats = list_chats()
189+
print_history_table(chats)
190+
return
191+
192+
if args.delete is not None:
193+
if not confirm_delete(args.delete):
194+
print("Delete cancelled.")
195+
return
34196

35-
system_prompt = load_system_prompt(config["system_prompt"])
197+
deleted = delete_chat(args.delete)
36198

37-
full_prompt = f"{system_prompt}\n\nUser: {args.message}\nAssistant:"
199+
if deleted:
200+
print(f"Chat {args.delete} deleted.")
201+
else:
202+
print(f"Chat {args.delete} not found.")
38203

39-
response = generate_response(
40-
prompt=full_prompt,
41-
model=config["model"],
42-
temperature=config["temperature"],
43-
)
44-
45-
print(response)
204+
return
46205

206+
if args.chat is not None:
207+
if args.chat == "new":
208+
run_chat(companion, system_prompt, companion_prompt, use_tools=use_tools)
209+
else:
210+
run_chat(companion, system_prompt, companion_prompt, resume_chat_id=int(args.chat), use_tools=use_tools)
211+
47212

48213
if __name__ == "__main__":
49214
main()

src/sodalis/config.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@ def load_companion_config(name: str = "default") -> dict:
2424
raise ValueError(f"Invalid companion config in {config_path}: top-level YAML must be a mapping")
2525

2626
# YAML validation
27-
required_fields = ["name", "model", "system_prompt", "temperature"]
27+
required_fields = ["name", "model", "companion_prompt", "temperature"]
2828
for field in required_fields:
2929
if field not in data:
3030
raise ValueError(f"Missing required field '{field}' in {config_path}")

0 commit comments

Comments
 (0)