-
Notifications
You must be signed in to change notification settings - Fork 1.1k
Expand file tree
/
Copy path.env.example
More file actions
184 lines (156 loc) · 7.35 KB
/
.env.example
File metadata and controls
184 lines (156 loc) · 7.35 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
# Database Configuration
DATABASE_URL=postgres://localhost/ironclaw
DATABASE_POOL_SIZE=10
# LLM Provider
# LLM_BACKEND=nearai # default
# Possible values: nearai, ollama, openai_compatible, openai, anthropic, tinfoil
# LLM_REQUEST_TIMEOUT_SECS=120 # Increase for local LLMs (Ollama, vLLM, LM Studio)
# === Anthropic Direct ===
# Two auth modes:
# 1. API key: Set ANTHROPIC_API_KEY (from console.anthropic.com/settings/keys)
# 2. OAuth token: Set ANTHROPIC_OAUTH_TOKEN (from `claude login`)
# OAuth tokens use Authorization: Bearer instead of x-api-key header.
# ANTHROPIC_API_KEY=sk-ant-...
# ANTHROPIC_OAUTH_TOKEN=sk-ant-oat01-... # from `claude login` credentials
# ANTHROPIC_MODEL=claude-sonnet-4-20250514
# === OpenAI Direct ===
# OPENAI_API_KEY=sk-...
# === NEAR AI (Chat Completions API) ===
# Two auth modes:
# 1. Session token (default): Uses browser OAuth (GitHub/Google) on first run.
# Session token stored in ~/.ironclaw/session.json automatically.
# Base URL defaults to https://private.near.ai
# 2. API key: Set NEARAI_API_KEY to use API key auth from cloud.near.ai.
# Base URL defaults to https://cloud-api.near.ai
NEARAI_MODEL=zai-org/GLM-5-FP8
NEARAI_BASE_URL=https://private.near.ai
NEARAI_AUTH_URL=https://private.near.ai
# NEARAI_SESSION_TOKEN=sess_... # hosting providers: set this
# NEARAI_SESSION_PATH=~/.ironclaw/session.json # optional, default shown
# NEARAI_API_KEY=... # API key from cloud.near.ai
# Local LLM Providers (Ollama, LM Studio, vLLM, LiteLLM)
# === Ollama ===
# OLLAMA_MODEL=llama3.2
# LLM_BACKEND=ollama
# OLLAMA_BASE_URL=http://localhost:11434 # default
# === OpenAI-compatible (LM Studio, vLLM, Anything-LLM) ===
# LLM_MODEL=llama-3.2-3b-instruct-q4_K_M
# LLM_BACKEND=openai_compatible
# LLM_BASE_URL=http://localhost:1234/v1
# LLM_API_KEY=sk-... # optional for local servers
# Custom HTTP headers for OpenAI-compatible providers
# Format: comma-separated key:value pairs
# LLM_EXTRA_HEADERS=HTTP-Referer:https://github.com/nearai/ironclaw,X-Title:ironclaw
# === OpenRouter (300+ models via OpenAI-compatible) ===
# LLM_MODEL=anthropic/claude-sonnet-4 # see openrouter.ai/models for IDs
# LLM_BACKEND=openai_compatible
# LLM_BASE_URL=https://openrouter.ai/api/v1
# LLM_API_KEY=sk-or-...
# LLM_EXTRA_HEADERS=HTTP-Referer:https://myapp.com,X-Title:MyApp
# === Together AI (via OpenAI-compatible) ===
# LLM_MODEL=meta-llama/Llama-3.3-70B-Instruct-Turbo
# LLM_BACKEND=openai_compatible
# LLM_BASE_URL=https://api.together.xyz/v1
# LLM_API_KEY=...
# === Fireworks AI (via OpenAI-compatible) ===
# LLM_MODEL=accounts/fireworks/models/llama4-maverick-instruct-basic
# LLM_BACKEND=openai_compatible
# LLM_BASE_URL=https://api.fireworks.ai/inference/v1
# LLM_API_KEY=fw_...
# === MiniMax ===
# LLM_BACKEND=minimax
# MINIMAX_API_KEY=...
# MINIMAX_MODEL=MiniMax-M2.5
# MINIMAX_BASE_URL=https://api.minimax.io/v1 # default (global); use https://api.minimaxi.com/v1 for China
# === Anthropic Direct ===
# LLM_BACKEND=anthropic
# ANTHROPIC_MODEL=claude-sonnet-4-6
# ANTHROPIC_API_KEY=sk-ant-...
# ANTHROPIC_BASE_URL=https://api.anthropic.com # default
# Prompt cache retention — controls Anthropic server-side prompt caching:
# none = disabled (no cache_control injected)
# short = 5-minute TTL, 1.25× (125%) write surcharge (default)
# long = 1-hour TTL, 2.0× (200%) write surcharge
# ANTHROPIC_CACHE_RETENTION=short
# For full provider setup guide see docs/LLM_PROVIDERS.md
# Channel Configuration
# CLI is always enabled
# Slack Bot (optional)
SLACK_BOT_TOKEN=xoxb-...
SLACK_APP_TOKEN=xapp-...
SLACK_SIGNING_SECRET=...
# Telegram Bot (optional)
TELEGRAM_BOT_TOKEN=...
# HTTP Webhook Server (optional)
HTTP_HOST=0.0.0.0
HTTP_PORT=8080
HTTP_WEBHOOK_SECRET=your-webhook-secret
# Webhook authentication uses HMAC-SHA256 signature verification.
# Callers must send an X-IronClaw-Signature header with format: sha256=<hex_digest>
# where the digest is HMAC-SHA256(HTTP_WEBHOOK_SECRET, raw_request_body) in lowercase hex.
#
# Example (bash):
# BODY='{"content":"hello"}'
# SIG=$(echo -n "$BODY" | openssl dgst -sha256 -hmac "$HTTP_WEBHOOK_SECRET" | cut -d' ' -f2)
# curl -X POST http://localhost:8080/webhook \
# -H "Content-Type: application/json" \
# -H "X-IronClaw-Signature: sha256=$SIG" \
# -d "$BODY"
#
# DEPRECATED: Passing "secret" in the JSON body still works but will be removed in a future release.
# Signal Channel (optional, requires signal-cli daemon --http)
# SIGNAL_HTTP_URL=http://127.0.0.1:8080
# SIGNAL_ACCOUNT=+1234567890
# SIGNAL_ALLOW_FROM=+1234567890,uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx # comma-separated, * for all, empty = deny/require pairing
# SIGNAL_ALLOW_FROM_GROUPS= # comma-separated group IDs, * for all, empty = deny all groups
# SIGNAL_DM_POLICY=pairing # open | allowlist | pairing
# SIGNAL_GROUP_POLICY=allowlist # allowlist | open | disabled
# SIGNAL_GROUP_ALLOW_FROM= # comma-separated, empty = inherit from ALLOW_FROM
# SIGNAL_IGNORE_ATTACHMENTS=false
# SIGNAL_IGNORE_STORIES=true
# Agent Settings
AGENT_NAME=ironclaw
AGENT_MAX_PARALLEL_JOBS=5
AGENT_JOB_TIMEOUT_SECS=3600
AGENT_STUCK_THRESHOLD_SECS=300
# Maximum tokens per job (0 = unlimited, also settable via settings.json agent.max_tokens_per_job)
# AGENT_MAX_TOKENS_PER_JOB=0
# Enable planning phase before tool execution (default: true)
AGENT_USE_PLANNING=true
# Self-repair settings
SELF_REPAIR_CHECK_INTERVAL_SECS=60
SELF_REPAIR_MAX_ATTEMPTS=3
# Heartbeat settings (proactive periodic execution)
# When enabled, reads HEARTBEAT.md checklist and reports findings
HEARTBEAT_ENABLED=false
HEARTBEAT_INTERVAL_SECS=1800
HEARTBEAT_NOTIFY_CHANNEL=cli
HEARTBEAT_NOTIFY_USER=default
# Memory hygiene settings (automatic cleanup of stale workspace documents)
# Runs on each heartbeat tick; identity files (IDENTITY.md, SOUL.md) are never deleted
# MEMORY_HYGIENE_ENABLED=true
# MEMORY_HYGIENE_DAILY_RETENTION_DAYS=30 # delete daily/ docs older than this many days
# MEMORY_HYGIENE_CONVERSATION_RETENTION_DAYS=7 # delete conversations/ docs older than this many days
# MEMORY_HYGIENE_CADENCE_HOURS=12 # minimum hours between cleanup passes
# Docker Sandbox
# SANDBOX_ENABLED=true
# SANDBOX_POLICY=readonly # readonly, workspace_write, or full_access
# SANDBOX_ALLOW_FULL_ACCESS=false # REQUIRED second opt-in for full_access policy.
# # FullAccess bypasses Docker entirely and runs
# # commands directly on the host. Without this
# # set to "true", full_access is downgraded to
# # workspace_write.
# SANDBOX_IMAGE=ironclaw-worker:latest
# SANDBOX_TIMEOUT_SECS=120
# SANDBOX_MEMORY_LIMIT_MB=2048
# Safety settings
SAFETY_MAX_OUTPUT_LENGTH=100000
SAFETY_INJECTION_CHECK_ENABLED=true
# Restart Feature (Docker containers only)
# Set IRONCLAW_IN_DOCKER=true in the container entrypoint to enable the restart feature.
# Without this, the restart tool and /restart command will be disabled.
# IRONCLAW_IN_DOCKER=false
# IRONCLAW_RESTART_DELAY=5 # default wait before exit (seconds, range: 1-30)
# IRONCLAW_MAX_FAILURES=10 # max consecutive failures before container exits
# Logging
RUST_LOG=ironclaw=debug,tower_http=debug