forked from nearai/ironclaw
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy path.env.example
More file actions
126 lines (103 loc) · 4.67 KB
/
.env.example
File metadata and controls
126 lines (103 loc) · 4.67 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
# Database Configuration
DATABASE_URL=postgres://localhost/ironclaw
DATABASE_POOL_SIZE=10
# LLM Provider
# LLM_BACKEND=nearai # default
# Possible values: nearai, ollama, openai_compatible, openai, anthropic, tinfoil
# === NEAR AI (Chat Completions API) ===
# Two auth modes:
# 1. Session token (default): Uses browser OAuth (GitHub/Google) on first run.
# Session token stored in ~/.ironclaw/session.json automatically.
# Base URL defaults to https://private.near.ai
# 2. API key: Set NEARAI_API_KEY to use API key auth from cloud.near.ai.
# Base URL defaults to https://cloud-api.near.ai
NEARAI_MODEL=zai-org/GLM-5-FP8
NEARAI_BASE_URL=https://private.near.ai
NEARAI_AUTH_URL=https://private.near.ai
# NEARAI_SESSION_TOKEN=sess_... # hosting providers: set this
# NEARAI_SESSION_PATH=~/.ironclaw/session.json # optional, default shown
# NEARAI_API_KEY=... # API key from cloud.near.ai
# Local LLM Providers (Ollama, LM Studio, vLLM, LiteLLM)
# === Ollama ===
# OLLAMA_MODEL=llama3.2
# LLM_BACKEND=ollama
# OLLAMA_BASE_URL=http://localhost:11434 # default
# === OpenAI-compatible (LM Studio, vLLM, Anything-LLM) ===
# LLM_MODEL=llama-3.2-3b-instruct-q4_K_M
# LLM_BACKEND=openai_compatible
# LLM_BASE_URL=http://localhost:1234/v1
# LLM_API_KEY=sk-... # optional for local servers
# Custom HTTP headers for OpenAI-compatible providers
# Format: comma-separated key:value pairs
# LLM_EXTRA_HEADERS=HTTP-Referer:https://github.com/nearai/ironclaw,X-Title:ironclaw
# === OpenRouter (300+ models via OpenAI-compatible) ===
# LLM_MODEL=anthropic/claude-sonnet-4 # see openrouter.ai/models for IDs
# LLM_BACKEND=openai_compatible
# LLM_BASE_URL=https://openrouter.ai/api/v1
# LLM_API_KEY=sk-or-...
# LLM_EXTRA_HEADERS=HTTP-Referer:https://myapp.com,X-Title:MyApp
# === Together AI (via OpenAI-compatible) ===
# LLM_MODEL=meta-llama/Llama-3.3-70B-Instruct-Turbo
# LLM_BACKEND=openai_compatible
# LLM_BASE_URL=https://api.together.xyz/v1
# LLM_API_KEY=...
# === Fireworks AI (via OpenAI-compatible) ===
# LLM_MODEL=accounts/fireworks/models/llama4-maverick-instruct-basic
# LLM_BACKEND=openai_compatible
# LLM_BASE_URL=https://api.fireworks.ai/inference/v1
# LLM_API_KEY=fw_...
# For full provider setup guide see docs/LLM_PROVIDERS.md
# Channel Configuration
# CLI is always enabled
# Slack Bot (optional)
SLACK_BOT_TOKEN=xoxb-...
SLACK_APP_TOKEN=xapp-...
SLACK_SIGNING_SECRET=...
# Telegram Bot (optional)
TELEGRAM_BOT_TOKEN=...
# HTTP Webhook Server (optional)
HTTP_HOST=0.0.0.0
HTTP_PORT=8080
HTTP_WEBHOOK_SECRET=your-webhook-secret
# Signal Channel (optional, requires signal-cli daemon --http)
# SIGNAL_HTTP_URL=http://127.0.0.1:8080
# SIGNAL_ACCOUNT=+1234567890
# SIGNAL_ALLOW_FROM=+1234567890,uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx # comma-separated, * for all, empty = deny/require pairing
# SIGNAL_ALLOW_FROM_GROUPS= # comma-separated group IDs, * for all, empty = deny all groups
# SIGNAL_DM_POLICY=pairing # open | allowlist | pairing
# SIGNAL_GROUP_POLICY=allowlist # allowlist | open | disabled
# SIGNAL_GROUP_ALLOW_FROM= # comma-separated, empty = inherit from ALLOW_FROM
# SIGNAL_IGNORE_ATTACHMENTS=false
# SIGNAL_IGNORE_STORIES=true
# Agent Settings
AGENT_NAME=ironclaw
AGENT_MAX_PARALLEL_JOBS=5
AGENT_JOB_TIMEOUT_SECS=3600
AGENT_STUCK_THRESHOLD_SECS=300
# Enable planning phase before tool execution (default: true)
AGENT_USE_PLANNING=true
# Self-repair settings
SELF_REPAIR_CHECK_INTERVAL_SECS=60
SELF_REPAIR_MAX_ATTEMPTS=3
# Heartbeat settings (proactive periodic execution)
# When enabled, reads HEARTBEAT.md checklist and reports findings
HEARTBEAT_ENABLED=false
HEARTBEAT_INTERVAL_SECS=1800
HEARTBEAT_NOTIFY_CHANNEL=cli
HEARTBEAT_NOTIFY_USER=default
# Memory hygiene settings (automatic cleanup of stale workspace documents)
# Runs on each heartbeat tick; identity files (IDENTITY.md, SOUL.md) are never deleted
# MEMORY_HYGIENE_ENABLED=true
# MEMORY_HYGIENE_RETENTION_DAYS=30 # delete daily/ docs older than this many days
# MEMORY_HYGIENE_CADENCE_HOURS=12 # minimum hours between cleanup passes
# Safety settings
SAFETY_MAX_OUTPUT_LENGTH=100000
SAFETY_INJECTION_CHECK_ENABLED=true
# Restart Feature (Docker containers only)
# Set IRONCLAW_IN_DOCKER=true in the container entrypoint to enable the restart feature.
# Without this, the restart tool and /restart command will be disabled.
# IRONCLAW_IN_DOCKER=false
# IRONCLAW_RESTART_DELAY=5 # default wait before exit (seconds, range: 1-30)
# IRONCLAW_MAX_FAILURES=10 # max consecutive failures before container exits
# Logging
RUST_LOG=ironclaw=debug,tower_http=debug