-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy path.env.example
37 lines (29 loc) · 1.01 KB
/
.env.example
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
#
# PUBLIC ENVIRONMENT VARIABLES, EXPOSED TO THE BROWSER
#
NEXT_PUBLIC_PANGEA_DOMAIN="$NEXT_PUBLIC_PANGEA_DOMAIN$"
NEXT_PUBLIC_AUTHN_CLIENT_TOKEN="$NEXT_PUBLIC_AUTHN_CLIENT_TOKEN$"
NEXT_PUBLIC_AUTHN_HOSTED_LOGIN_URL="$NEXT_PUBLIC_AUTHN_HOSTED_LOGIN_URL$"
#
# PRIVATE ENVIRONMENT VARIABLES, NOT EXPOSED TO THE BROWSER
# ONLY AVAILABLE ON THE SERVER
#
PANGEA_SERVICE_TOKEN="$PANGEA_SERVICE_TOKEN$"
OPENAI_API_KEY="$OPENAI_API_KEY$"
OPTIONS_REDACT_USER_PROMPTS="true"
OPTIONS_AUDIT_USER_PROMPTS="true"
OPTIONS_THREAT_ANALYSE_SERVICE_RESPONSES="true"
# Replace with your huggingface token
HF_TOKEN="YOUR HUGGING FACE TOKEN HERE"
# Keep this the same if running locally
MODEL_BASE_PATH="http://fastchat-api-server:8000/v1"
# LLM Runtime Settings =============================
# Model Path
MODEL_PATH="meta-llama/Llama-2-7b-chat-hf"
# HuggingFace Model Name
MODEL_NAME="Llama-2-7b-chat-hf"
# Number of CPU threads to use for CPU inference
CPU_THREADS=5
# OpenVINO Settings
IR_PATH=/home/openvino/models/llama-2/ir_model
DEVICE="GPU.1"