-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathmemory-layer.toml.example
More file actions
74 lines (65 loc) · 2.09 KB
/
memory-layer.toml.example
File metadata and controls
74 lines (65 loc) · 2.09 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
# Shared Memory Layer defaults and secrets.
# Repo-local overrides should live in .mem/config.toml inside each project.
[service]
bind_addr = "127.0.0.1:4040"
capnp_unix_socket = "/tmp/memory-layer.capnp.sock"
capnp_tcp_addr = "127.0.0.1:4041"
# optional override for bundled web assets
# web_root = "/usr/share/memory-layer/web"
# service API token is provisioned automatically into memory-layer.env
request_timeout = "30s"
[cluster]
enabled = true
# optional stable identifier; empty means Memory Layer derives one from bind_addr
service_id = ""
# set this when other machines should reach this service on a LAN address
# advertise_addr = "10.22.6.42:4040"
discovery_multicast_addr = "239.255.42.99:4042"
announce_interval = "5s"
peer_ttl = "15s"
priority = 100
[database]
url = "postgresql://memory:<password>@localhost:5432/memory"
# Optional shared writer label. If omitted, Memory Layer derives a stable writer id
# automatically from the tool, local user, and host name.
# [writer]
# id = "codex-cli-main"
# # optional display name stored alongside captures
# # name = "Codex CLI"
[features]
llm_curation = false
[llm]
# Use "ollama" for a local Ollama server at http://127.0.0.1:11434/v1.
provider = "openai_compatible"
base_url = "https://api.openai.com/v1"
api_key_env = "OPENAI_API_KEY"
model = ""
temperature = 0.0
max_input_bytes = 120000
max_output_tokens = 3000
[embeddings]
provider = "openai"
base_url = "https://api.openai.com/v1"
api_key_env = "OPENAI_API_KEY"
model = ""
batch_size = 16
# Local Ollama embedding example:
# [[embeddings.backends]]
# name = "ollama-nomic"
# provider = "ollama"
# base_url = "http://127.0.0.1:11434/v1"
# api_key_env = ""
# model = "nomic-embed-text"
[automation]
enabled = false
mode = "suggest"
poll_interval = "10s"
capture_idle_threshold = "10m"
min_changed_files = 2
require_passing_test = false
curate_after_captures = 3
curate_on_explicit_flush = true
ignored_paths = [".git/", "target/", ".memory-layer/"]
# repo_root = "/path/to/repo"
# audit_log_path = "/path/to/repo/.memory-layer/automation.log"
# state_file_path = "/path/to/repo/.memory-layer/automation-state.json"