-
Notifications
You must be signed in to change notification settings - Fork 205
/
Copy pathenv.example
84 lines (70 loc) · 2.95 KB
/
env.example
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
# Make a copy of this file with the name .env and assign values to variables
# Location of the application routes
FLASK_APP=api/app.py
# Ensure print statements appear as they happen
PYTHONUNBUFFERED=1
# How you connect to Elasticsearch: change details to your instance
ELASTICSEARCH_URL=http://localhost:9200
ELASTICSEARCH_USER=elastic
ELASTICSEARCH_PASSWORD=elastic
# ELASTICSEARCH_API_KEY=
# The name of the Elasticsearch indexes
ES_INDEX=workplace-app-docs
ES_INDEX_CHAT_HISTORY=workplace-app-docs-chat-history
# Uncomment and complete if you want to use OpenAI
# LLM_TYPE=openai
# OPENAI_API_KEY=
# CHAT_MODEL=gpt-4o-mini
# Uncomment and complete if you want to use Azure OpenAI
# LLM_TYPE=azure
## "Azure OpenAI Endpoint" in https://oai.azure.com/resource/overview
# AZURE_OPENAI_ENDPOINT=https://YOUR_RESOURCE_NAME.openai.azure.com/
## "API key 1 (or 2)" in https://oai.azure.com/resource/overview
# AZURE_OPENAI_API_KEY=
## "Inference version" from https://learn.microsoft.com/en-us/azure/ai-services/openai/api-version-deprecation
# OPENAI_API_VERSION=2024-10-01-preview
## "Name" from https://oai.azure.com/resource/deployments
# CHAT_DEPLOYMENT=YOUR_DEPLOYMENT_NAME
# Uncomment and complete if you want to use Bedrock LLM
# LLM_TYPE=bedrock
# AWS_ACCESS_KEY_ID=
# AWS_SECRET_ACCESS_KEY=
# AWS_DEFAULT_REGION=
# CHAT_MODEL=amazon.titan-text-lite-v1
# Uncomment and complete if you want to use Vertex AI
# LLM_TYPE=vertex
## Project that has the service "aiplatform.googleapis.com" enabled
# GOOGLE_CLOUD_PROJECT=
# GOOGLE_CLOUD_REGION=
# CHAT_MODEL=gemini-1.5-flash-002
## Needed if you haven't run `gcloud auth application-default login`
# GOOGLE_APPLICATION_CREDENTIALS=
# Uncomment and complete if you want to use Mistral AI
# LLM_TYPE=mistral
## Key in https://console.mistral.ai/api-keys/
# MISTRAL_API_KEY=
## 'API Endpoints' from https://docs.mistral.ai/getting-started/models/models_overview/
# CHAT_MODEL=open-mistral-nemo
## Only set this if not using the default Mistral base URL
# MISTRAL_BASE_URL=
# Uncomment and complete if you want to use Cohere
# LLM_TYPE=cohere
## Key in https://dashboard.cohere.com/api-keys
# COHERE_API_KEY=
# CHAT_MODEL=command-r7b-12-2024
# Set to false, if you want to record logs, traces and metrics.
OTEL_SDK_DISABLED=true
# Assign the service name that shows up in Kibana
OTEL_SERVICE_NAME=chatbot-rag-app
# Default to send traces to the Elastic APM server
OTEL_EXPORTER_OTLP_ENDPOINT=http://localhost:8200
OTEL_EXPORTER_OTLP_PROTOCOL=http/protobuf
# Change to 'false' to hide prompt and completion content
OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT=true
# Export metrics every 3 seconds instead of every minute
OTEL_METRIC_EXPORT_INTERVAL=3000
# Export traces every 3 seconds instead of every 5 seconds
OTEL_BSP_SCHEDULE_DELAY=3000
# Change to affect behavior of which resources are detected. Note: these
# choices are specific to the language, in this case Python.
OTEL_EXPERIMENTAL_RESOURCE_DETECTORS=process_runtime,os,otel,telemetry_distro