-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy path.env.mac.example
53 lines (40 loc) · 1.4 KB
/
.env.mac.example
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
#
# PUBLIC ENVIRONMENT VARIABLES, EXPOSED TO THE BROWSER
#
export NEXT_PUBLIC_PANGEA_DOMAIN="$NEXT_PUBLIC_PANGEA_DOMAIN$"
export NEXT_PUBLIC_AUTHN_CLIENT_TOKEN="$NEXT_PUBLIC_AUTHN_CLIENT_TOKEN$"
export NEXT_PUBLIC_AUTHN_HOSTED_LOGIN_URL="$NEXT_PUBLIC_AUTHN_HOSTED_LOGIN_URL$"
#
# PRIVATE ENVIRONMENT VARIABLES, NOT EXPOSED TO THE BROWSER
# ONLY AVAILABLE ON THE SERVER
#
export PANGEA_SERVICE_TOKEN="$PANGEA_SERVICE_TOKEN$"
export OPENAI_API_KEY="$OPENAI_API_KEY$"
export OPTIONS_REDACT_USER_PROMPTS="true"
export OPTIONS_AUDIT_USER_PROMPTS="true"
export OPTIONS_THREAT_ANALYSE_SERVICE_RESPONSES="true"
# Replace with your huggingface token
export HF_TOKEN="YOUR HF TOKEN HERE"
# Keep this the same if running locally
export MODEL_BASE_PATH="http://${HOSTNAME}:8000/v1"
# LLM Runtime Settings =============================
# Model Path
# export MODEL_PATH="meta-llama/Llama-2-7b-chat-hf"
# HuggingFace Model Name
# export MODEL_NAME="Llama-2-7b-chat-hf"
# Model Path
export MODEL_PATH="lmsys/vicuna-7b-v1.3"
# HuggingFace Model Name
export MODEL_NAME="vicuna-7b-v1.3"
# Number of CPU threads to use for CPU inference
export CPU_THREADS=5
# OpenVINO Settings
export IR_PATH=/home/openvino/models/llama-2/ir_model
export DEVICE="GPU.1"
# Pytorch config
export OM_NUM_THREADS=8
export OMP_NUM_THREADS=8
export MKL_NUM_THREADS=8
export OPENBLAS_NUM_THREADS=8
export VECLIB_MAXIMUM_THREADS=8
export NUMEXPR_NUM_THREADS=8