forked from potpie-ai/potpie
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy path.env.template
More file actions
194 lines (161 loc) · 6.26 KB
/
.env.template
File metadata and controls
194 lines (161 loc) · 6.26 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
# App & Environment
APP_NAME=potpie-ai
APP_VERSION=0.1.0
ENV=development # development | staging | production
isDevelopmentMode=enabled
STRICT_ENV_VALIDATION=true
# Logging & Observability
LOG_LEVEL=debug # debug | info | warn | error
LOGFIRE_ENABLED=true
LOGFIRE_TOKEN=
LOGFIRE_PROJECT_NAME=potpie-ai
TRACE_SAMPLE_RATE=0.1
# AI / LLM Configuration
LLM_PROVIDER=openai # openai | ollama | anthropic | openrouter
OPENAI_API_KEY=
CHAT_MODEL=gpt-4o
INFERENCE_MODEL=gpt-4o-mini
LLM_TEMPERATURE=0.2
LLM_MAX_TOKENS=4096
LLM_TIMEOUT=60
# Optional OpenAI-compatible overrides
LLM_API_BASE=
LLM_API_VERSION=
# Capability overrides (true / false)
LLM_SUPPORTS_PYDANTIC=false
LLM_SUPPORTS_STREAMING=true
LLM_SUPPORTS_VISION=false
LLM_SUPPORTS_TOOL_PARALLELISM=false
# Feature Flags
ENABLE_STREAMING=true
ENABLE_RAG=true
ENABLE_TOOLS=true
ENABLE_CODE_ANALYSIS=true
# Database Configuration
POSTGRES_PASSWORD=your_password_here
POSTGRES_SERVER=postgresql://postgres:${POSTGRES_PASSWORD}@localhost:5432/momentum_dev
NEO4J_URI=bolt://127.0.0.1:7687
NEO4J_USERNAME=neo4j
NEO4J_PASSWORD=
# Redis & Background Jobs
REDISHOST=127.0.0.1
REDISPORT=6379
REDIS_DB=0
BROKER_URL=redis://127.0.0.1:6379/0
CELERY_QUEUE_NAME=dev
defaultUsername=defaultuser
PROJECT_PATH=projects #repositories will be downloaded/cloned to this path on your system.
INFERENCE_MODEL=openai/gpt-4.1-mini
CHAT_MODEL=openai/gpt-4o
# Optional overrides for OpenAI-compatible endpoints (e.g., Azure, Ollama, Mistral)
LLM_API_BASE=
LLM_API_VERSION=
# Optional capability overrides for custom providers
# Set to "true"/"false" (1/0 also accepted) to force specific behaviour
LLM_SUPPORTS_PYDANTIC=
LLM_SUPPORTS_STREAMING=
LLM_SUPPORTS_VISION=
LLM_SUPPORTS_TOOL_PARALLELISM=
# following are for production mode
GCP_PROJECT=
GCP_SECRET_MANAGER_DISABLED=false
# Context and history management (Phase 2: token- and model-aware limits)
# HISTORY_TOKEN_BUDGET: max tokens for history when model context unknown (default: 30000)
# HISTORY_TOKEN_BUDGET_RATIO: fraction of model context window for history (default: 0.75)
# HISTORY_MESSAGE_CAP: max messages in ctx.history when using message-count proxy (default: 50)
# HISTORY_TOKEN_BUDGET=
# HISTORY_TOKEN_BUDGET_RATIO=0.75
# HISTORY_MESSAGE_CAP=50
# Phase 3: Persist compressed history per conversation (cross-request)
# CONTEXT_MANAGEMENT_USE_PERSISTED_COMPRESSED_HISTORY: enable conversation-scoped store (default: true)
# COMPRESSED_HISTORY_TTL_SECONDS: TTL for in-memory entries (default: 86400)
# COMPRESSED_HISTORY_MAX_CONVERSATIONS: max entries in memory store (default: 500)
# COMPRESSED_HISTORY_STORE_BACKEND: "memory" (Phase 3a) or "redis" (Phase 3b, optional)
# CONTEXT_MANAGEMENT_USE_PERSISTED_COMPRESSED_HISTORY=true
# COMPRESSED_HISTORY_TTL_SECONDS=86400
# COMPRESSED_HISTORY_MAX_CONVERSATIONS=500
# COMPRESSED_HISTORY_STORE_BACKEND=memory
# Phase 4: Optional LLM summarization when still over limit after compaction
# CONTEXT_MANAGEMENT_SUMMARIZATION_ENABLED: enable middle-segment summarization (default: false)
# CONTEXT_MANAGEMENT_SUMMARIZATION_MODEL: optional model for summarization (e.g. cheaper); unset = same/cheaper from factory
# SUMMARIZATION_HEAD_MESSAGES: messages to keep at start (default: 2)
# SUMMARIZATION_TAIL_MESSAGES: messages to keep at end (default: 6)
# SUMMARIZATION_TARGET_TOKENS: target tokens for summary (default: 10000)
# CONTEXT_MANAGEMENT_SUMMARIZATION_ENABLED=false
# CONTEXT_MANAGEMENT_SUMMARIZATION_MODEL=
# SUMMARIZATION_HEAD_MESSAGES=2
# SUMMARIZATION_TAIL_MESSAGES=6
# SUMMARIZATION_TARGET_TOKENS=10000
# Multimodal Feature Flag
# Controls availability of image upload and multimodal AI functionality
# Possible values:
# "auto" (default) - Enable automatically when all GCP vars present
# "enabled" - Force enable (requires GCP vars)
# "disabled" - Force disable regardless of GCP vars
isMultimodalEnabled=auto
# Required for multimodal functionality (when enabled)
OBJECT_STORAGE_PROVIDER=gcs
# Storage provider selection: "s3", "gcs", "azure", or "auto" (default: auto)
# GCS config
GCS_PROJECT_ID=
GCS_BUCKET_NAME=
GCS_HMAC_ACCESS_KEY=
GCS_HMAC_SECRET_KEY=
# S3
S3_BUCKET_NAME=
AWS_REGION=
AWS_ACCESS_KEY_ID=
AWS_SECRET_ACCESS_KEY=
# Email & Notifications
EMAIL_ENABLED=false
EMAIL_PROVIDER=resend
EMAIL_FROM_ADDRESS=
RESEND_API_KEY=
TRANSACTION_EMAILS_ENABLED=
# Analytics & Integrations
POSTHOG_API_KEY=
POSTHOG_HOST=
FIRECRAWL_API_KEY=
ANTHROPIC_API_KEY=
OPENROUTER_API_KEY=
# Logfire Tracing Configuration
# Set LOGFIRE_SEND_TO_CLOUD=false to disable sending traces to Logfire cloud
LOGFIRE_SEND_TO_CLOUD=true
# Logfire API token (get from https://logfire.pydantic.dev)
LOGFIRE_TOKEN=
# Project name shown in Logfire UI (optional)
LOGFIRE_PROJECT_NAME=potpie-ai
# Service name for resource attributes (default: project or "potpie")
# LOGFIRE_SERVICE_NAME=potpie
# Environment for traces (e.g. development, staging, production). Also set via ENV.
# LOGFIRE_ENVIRONMENT=development
# GitHub Authentication Configuration
# GH_TOKEN_LIST: Personal Access Tokens for GitHub.com (comma-separated for token pool)
# GITHUB_APP_ID + GITHUB_PRIVATE_KEY: GitHub App credentials (recommended for production)
# CODE_PROVIDER_TOKEN: Token for self-hosted Git servers (GitBucket, GitLab, etc.)
# CODE_PROVIDER_BASE_URL: API base URL for self-hosted Git servers
# Optional: Git provider configuration for self-hosted instances
# Supported providers: github, gitbucket, gitlab, bitbucket, local
# Options: github, gitlab, gitbucket, local
CODE_PROVIDER=github
# e.g., http://localhost:8080/api/v3 for GitBucket, /path/to/repo for local
CODE_PROVIDER_BASE_URL=
# PAT for self-hosted Git server (not needed for local)
CODE_PROVIDER_TOKEN=
# For local provider:
# CODE_PROVIDER=local
# CODE_PROVIDER_BASE_URL=/path/to/local/repository
# For GitHub:
# CODE_PROVIDER=github
# CODE_PROVIDER_BASE_URL=https://api.github.com # Optional, has default
# CODE_PROVIDER_TOKEN=ghp_xxxxx
# For GitBucket:
# CODE_PROVIDER=gitbucket
# CODE_PROVIDER_BASE_URL=http://localhost:8080/api/v3 # Required
# CODE_PROVIDER_TOKEN=your_token
# Testing
PRIVATE_TEST_REPO_NAME=<yourGithubUsername>/potpie-private-test-repo
# Socket.IO Workspace Tunnel (always enabled at /ws)
# Extension connects via WebSocket to /ws, registers workspace; agent tool calls go over Socket.IO.
# WORKSPACE_SOCKET_TTL=300
# WORKSPACE_TOOL_CALL_TIMEOUT=30