-
Notifications
You must be signed in to change notification settings - Fork 1.4k
Expand file tree
/
Copy path.env.example
More file actions
128 lines (93 loc) · 4.24 KB
/
.env.example
File metadata and controls
128 lines (93 loc) · 4.24 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
# ==============================================================================
# DeepTutor Environment Configuration
# ==============================================================================
# Copy this file to `.env` and fill in the values.
# Required fields are marked with [Required], optional fields with [Optional].
# ==============================================================================
# Server Ports
# ==============================================================================
# [Optional] Backend API server port
BACKEND_PORT=8001
# [Optional] Frontend server port
FRONTEND_PORT=3782
# ==============================================================================
# LLM Configuration (Large Language Model)
# ==============================================================================
# Primary LLM for all AI operations (chat, research, solve, etc.)
# [Required] Provider binding: openai, azure_openai, anthropic,
# deepseek, openrouter, groq, together, mistral
# ollama, lm_studio, vllm, llama_cpp
LLM_BINDING=openai
# [Required] Model name (e.g., gpt-4o, deepseek-chat, claude-3-5-sonnet)
LLM_MODEL=gpt-4o
# [Required] API key for the LLM provider
LLM_API_KEY=sk-xxx
# [Required] API endpoint URL
LLM_HOST=https://api.openai.com/v1
# [Optional] API version (required for Azure OpenAI)
LLM_API_VERSION=
# ==============================================================================
# Embedding Configuration
# ==============================================================================
# Embedding model for RAG (Retrieval-Augmented Generation)
# [Required] Provider: openai, azure_openai, jina,
# cohere, huggingface, google, ollama, lm_studio
EMBEDDING_BINDING=openai
# [Required] Model name
EMBEDDING_MODEL=text-embedding-3-small
# [Required] API key
EMBEDDING_API_KEY=sk-xxx
# [Required] API endpoint URL
EMBEDDING_HOST=https://api.openai.com/v1
# [Required] Vector dimensions (must match model output)
EMBEDDING_DIMENSION=3072
# [Optional] API version (for Azure OpenAI)
EMBEDDING_API_VERSION=
# ==============================================================================
# TTS Configuration (Text-to-Speech)
# ==============================================================================
# Optional: Enable audio narration features
# [Optional] Provider: openai, azure_openai
TTS_BINDING=openai
# [Optional] TTS model name
TTS_MODEL=tts-1
# [Optional] API key (can be same as LLM_API_KEY for OpenAI)
TTS_API_KEY=sk-xxx
# [Optional] API endpoint URL
TTS_URL=https://api.openai.com/v1
# [Optional] Voice: alloy, echo, fable, onyx, nova, shimmer
TTS_VOICE=alloy
# [Optional] API version (for Azure OpenAI)
TTS_BINDING_API_VERSION=
# ==============================================================================
# Search Configuration (Web Search)
# ==============================================================================
# Optional: Enable web search capabilities
# [Optional] Provider: perplexity, tavily, serper, jina, exa
SEARCH_PROVIDER=perplexity
# [Optional] API key for your chosen search provider
SEARCH_API_KEY=pplx-xxx
# ==============================================================================
# Cloud Deployment Configuration
# ==============================================================================
# Required when deploying to cloud/remote servers
# [Optional] External API base URL for cloud deployment
# Set this to your server's public URL when deploying remotely
# Example: https://your-server.com:8001 or https://api.yourdomain.com
NEXT_PUBLIC_API_BASE_EXTERNAL=
# [Optional] Direct API base URL (alternative to above)
NEXT_PUBLIC_API_BASE=
# ==============================================================================
# Debug & Development
# ==============================================================================
# [Optional] Disable SSL verification (not recommended for production)
DISABLE_SSL_VERIFY=false
# ==============================
# HuggingFace / MinerU (Optional)
# ==============================
# Use a HuggingFace mirror endpoint (optional)
# HF_ENDPOINT=https://your-hf-mirror.example.com
# HuggingFace cache directory (recommended: mount this in Docker to reuse cache)
# HF_HOME=/app/data/hf
# Force offline mode (requires models already downloaded into the cache)
# HF_HUB_OFFLINE=1