Skip to content

Commit 914ca67

Browse files
add config files
1 parent eb97491 commit 914ca67

2 files changed

Lines changed: 174 additions & 0 deletions

File tree

Lines changed: 26 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,26 @@
1+
name: Lightspeed Core Service (LCS)
2+
service:
3+
host: 0.0.0.0
4+
port: 8080
5+
auth_enabled: false
6+
workers: 1
7+
color_log: true
8+
access_log: true
9+
llama_stack:
10+
# Uses a remote llama-stack service
11+
# The instance would have already been started with a llama-stack-run.yaml file
12+
# use_as_library_client: false
13+
14+
# Alternative for "as library use"
15+
use_as_library_client: true
16+
library_client_config_path: /app-root/run.yaml
17+
url: http://localhost:8321
18+
api_key: xyzzy
19+
user_data_collection:
20+
feedback_enabled: true
21+
feedback_storage: "/tmp/data/feedback"
22+
transcripts_enabled: true
23+
transcripts_storage: "/tmp/data/transcripts"
24+
25+
authentication:
26+
module: "noop"
Lines changed: 148 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,148 @@
1+
version: 2
2+
3+
apis:
4+
- agents
5+
- batches
6+
- datasetio
7+
- eval
8+
- files
9+
- inference
10+
- safety
11+
- scoring
12+
- tool_runtime
13+
- vector_io
14+
15+
benchmarks: []
16+
datasets: []
17+
image_name: starter
18+
# external_providers_dir: /opt/app-root/src/.llama/providers.d
19+
20+
providers:
21+
inference:
22+
- provider_id: openai # This ID is a reference to 'providers.inference'
23+
provider_type: remote::openai
24+
config:
25+
api_key: ${env.OPENAI_API_KEY}
26+
allowed_models: ["${env.E2E_OPENAI_MODEL:=gpt-4o-mini}"]
27+
- config: {}
28+
provider_id: sentence-transformers
29+
provider_type: inline::sentence-transformers
30+
files:
31+
- config:
32+
metadata_store:
33+
table_name: files_metadata
34+
backend: sql_default
35+
storage_dir: ~/.llama/storage/files
36+
provider_id: meta-reference-files
37+
provider_type: inline::localfs
38+
safety:
39+
- config:
40+
excluded_categories: []
41+
provider_id: llama-guard
42+
provider_type: inline::llama-guard
43+
scoring:
44+
- provider_id: basic
45+
provider_type: inline::basic
46+
config: {}
47+
- provider_id: llm-as-judge
48+
provider_type: inline::llm-as-judge
49+
config: {}
50+
- provider_id: braintrust
51+
provider_type: inline::braintrust
52+
config:
53+
openai_api_key: '********'
54+
tool_runtime:
55+
- config: {} # Enable the RAG tool
56+
provider_id: rag-runtime
57+
provider_type: inline::rag-runtime
58+
vector_io:
59+
- config: # Define the storage backend for RAG
60+
persistence:
61+
namespace: vector_io::faiss
62+
backend: kv_default
63+
provider_id: faiss
64+
provider_type: inline::faiss
65+
agents:
66+
- config:
67+
persistence:
68+
agent_state:
69+
namespace: agents_state
70+
backend: kv_default
71+
responses:
72+
table_name: agents_responses
73+
backend: sql_default
74+
provider_id: meta-reference
75+
provider_type: inline::meta-reference
76+
batches:
77+
- config:
78+
kvstore:
79+
namespace: batches_store
80+
backend: kv_default
81+
provider_id: reference
82+
provider_type: inline::reference
83+
datasetio:
84+
- config:
85+
kvstore:
86+
namespace: huggingface_datasetio
87+
backend: kv_default
88+
provider_id: huggingface
89+
provider_type: remote::huggingface
90+
- config:
91+
kvstore:
92+
namespace: localfs_datasetio
93+
backend: kv_default
94+
provider_id: localfs
95+
provider_type: inline::localfs
96+
eval:
97+
- config:
98+
kvstore:
99+
namespace: eval_store
100+
backend: kv_default
101+
provider_id: meta-reference
102+
provider_type: inline::meta-reference
103+
scoring_fns: []
104+
server:
105+
port: 8321
106+
storage:
107+
backends:
108+
kv_default: # Define the storage backend type for RAG, in this case registry and RAG are unified i.e. information on registered resources (e.g. models, vector_stores) are saved together with the RAG chunks
109+
type: kv_sqlite
110+
db_path: ${env.KV_STORE_PATH:=~/.llama/storage/rag/kv_store.db}
111+
sql_default:
112+
type: sql_sqlite
113+
db_path: ${env.SQL_STORE_PATH:=~/.llama/storage/sql_store.db}
114+
stores:
115+
metadata:
116+
namespace: registry
117+
backend: kv_default
118+
inference:
119+
table_name: inference_store
120+
backend: sql_default
121+
max_write_queue_size: 10000
122+
num_writers: 4
123+
conversations:
124+
table_name: openai_conversations
125+
backend: sql_default
126+
prompts:
127+
namespace: prompts
128+
backend: kv_default
129+
registered_resources:
130+
models: []
131+
shields:
132+
- shield_id: llama-guard
133+
provider_id: llama-guard
134+
provider_shield_id: openai/gpt-4o-mini
135+
vector_stores: []
136+
datasets: []
137+
scoring_fns: []
138+
benchmarks: []
139+
tool_groups:
140+
- toolgroup_id: builtin::rag # Register the RAG tool
141+
provider_id: rag-runtime
142+
vector_stores:
143+
default_provider_id: faiss
144+
default_embedding_model: # Define the default embedding model for RAG
145+
provider_id: sentence-transformers
146+
model_id: nomic-ai/nomic-embed-text-v1.5
147+
safety:
148+
default_shield_id: llama-guard

0 commit comments

Comments
 (0)