-
Notifications
You must be signed in to change notification settings - Fork 15
Expand file tree
/
Copy pathMakefile
More file actions
285 lines (255 loc) · 13 KB
/
Makefile
File metadata and controls
285 lines (255 loc) · 13 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
# Makefile for Ansible Chatbot Stack
# Default values for environment variables
QUAY_ORG ?=
ANSIBLE_CHATBOT_VERSION ?= latest
ANSIBLE_CHATBOT_VLLM_URL ?=
ANSIBLE_CHATBOT_VLLM_API_TOKEN ?=
ANSIBLE_CHATBOT_INFERENCE_MODEL ?=
ANSIBLE_CHATBOT_INFERENCE_MODEL_FILTER ?=
LLAMA_STACK_PORT ?= 8321
LOCAL_DB_PATH ?= ./local_db
CONTAINER_DB_PATH ?= /.llama/data/distributions/ansible-chatbot
RAG_CONTENT_IMAGE ?= quay.io/ansible/aap-rag-content:latest
LIGHTSPEED_STACK_CONFIG ?= lightspeed-stack.yaml
LLAMA_STACK_RUN_CONFIG ?= ansible-chatbot-run.yaml
SYSTEM_PROMPT ?= ansible-chatbot-system-prompt.txt
PROVIDER_VECTOR_DB_ID_FILE ?= "./vector_db/provider_vector_db_id.ind"
PROVIDER_VECTOR_DB_ID ?= $(shell [ -f $(PROVIDER_VECTOR_DB_ID_FILE) ] && cat $(PROVIDER_VECTOR_DB_ID_FILE))
CHATBOT_API_TOKEN ?=
# Colors for terminal output
RED := \033[0;31m
NC := \033[0m # No Color
# Choose between docker and podman based on what is available
ifeq (, $(shell which podman))
CONTAINER_RUNTIME ?= docker
IMAGE_PREFIX ?=
else
CONTAINER_RUNTIME ?= podman
IMAGE_PREFIX ?= localhost/
endif
.PHONY: help setup setup-test build build-custom run clean all deploy-k8s shell tag-and-push test update-lock
.EXPORT_ALL_VARIABLES:
UV_HTTP_TIMEOUT=120
PLATFORM ?= "linux/amd64"
help:
@echo "Makefile for Ansible Chatbot Stack"
@echo "Available targets:"
@echo " help - Show this help message"
@echo " all - Run all steps (setup, build, build-custom)"
@echo " setup - Sets up llama-stack and the external lightspeed providers"
@echo " setup-test - Sets up test environment with dummy data (no Quay credentials needed)"
@echo " setup-vector-db - Sets up vector DB and embedding model"
@echo " build - Build the customized Ansible Chatbot Stack image from lightspeed-core/lightspeed-stack"
@echo " run - Run the Ansible Chatbot Stack container built with 'build-lsc'"
@echo " run-test - Run some sanity checks for the Ansible Chatbot Stack container built with 'build-lsc'"
@echo " run-local-db - Run the Ansible Chatbot Stack container with local DB mapped to conatiner DB"
@echo " clean - Clean up generated files and container images"
@echo " deploy-k8s - Deploy to Kubernetes cluster"
@echo " shell - Get a shell in the container"
@echo " tag-and-push - Tag and push the container image to quay.io"
@echo " update-lock - Update uv.lock file"
@echo ""
@echo "Test targets:"
@echo " test - Run all tests (uses mock OpenAI server)"
@echo ""
@echo "Required Environment variables:"
@echo " ANSIBLE_CHATBOT_VERSION - Version tag for the image (default: $(ANSIBLE_CHATBOT_VERSION))"
@echo " ANSIBLE_CHATBOT_VLLM_URL - URL for the vLLM inference provider"
@echo " ANSIBLE_CHATBOT_VLLM_API_TOKEN - API token for the vLLM inference provider"
@echo " ANSIBLE_CHATBOT_INFERENCE_MODEL - Inference model to use"
@echo " ANSIBLE_CHATBOT_INFERENCE_MODEL_FILTER - Inference model to use for tools filtering"
@echo " CONTAINER_DB_PATH - Path to the container database (default: $(CONTAINER_DB_PATH))"
@echo " LOCAL_DB_PATH - Path to the local database (default: $(LOCAL_DB_PATH))"
@echo " LLAMA_STACK_PORT - Port to expose (default: $(LLAMA_STACK_PORT))"
@echo " QUAY_ORG - Quay organization name (default: $(QUAY_ORG))"
setup: setup-vector-db llama-stack/providers.d/inline/agents/lightspeed_inline_agent.yaml
llama-stack/providers.d/inline/agents/lightspeed_inline_agent.yaml:
@echo "Setting up environment..."
uv sync
scripts/download_agent_yaml.sh
@echo "Environment setup complete."
setup-test: llama-stack/providers.d/inline/agents/lightspeed_inline_agent.yaml
@echo "Setting up test environment (dummy data)..."
uv sync --group test
uv run python tests/setup_test_data.py
@echo "Test environment setup complete."
setup-vector-db: vector_db/aap_faiss_store.db
vector_db/aap_faiss_store.db:
@echo "Setting up vector db and embedding image..."
rm -rf ./vector_db ./embeddings_model
mkdir -p ./vector_db
$(CONTAINER_RUNTIME) run --platform $(PLATFORM) -d --rm --name rag-content $(RAG_CONTENT_IMAGE) sleep infinity
$(CONTAINER_RUNTIME) cp rag-content:/rag/llama_stack_vector_db/faiss_store.db.gz ./vector_db/aap_faiss_store.db.gz
$(CONTAINER_RUNTIME) cp rag-content:/rag/llama_stack_vector_db/provider_vector_db_id.ind ./vector_db/provider_vector_db_id.ind
$(CONTAINER_RUNTIME) cp rag-content:/rag/embeddings_model .
$(CONTAINER_RUNTIME) kill rag-content
gzip -d ./vector_db/aap_faiss_store.db.gz
# this permission changes will allow the container user 1001 to read/write the files
# in these directories
chmod -R og+rw ./vector_db/
chmod -R og+rw ./embeddings_model/
# Pre-check required environment variables for build
check-env-build:
@if [ -z "$(ANSIBLE_CHATBOT_VERSION)" ]; then \
printf "$(RED)Error: ANSIBLE_CHATBOT_VERSION is required but not set$(NC)\n"; \
exit 1; \
fi
build: check-env-build setup
@echo "Building customized Ansible Chatbot Stack image from lightspeed-core/lightspeed-stack..."
$(CONTAINER_RUNTIME) build --no-cache --platform $(PLATFORM) -f ./Containerfile \
--build-arg ANSIBLE_CHATBOT_VERSION=$(ANSIBLE_CHATBOT_VERSION) \
--build-arg LLAMA_STACK_RUN_CONFIG=$(LLAMA_STACK_RUN_CONFIG) \
-t ansible-chatbot-stack:$(ANSIBLE_CHATBOT_VERSION) .
@printf "Custom image $(RED)ansible-chatbot-stack:$(ANSIBLE_CHATBOT_VERSION)$(NC) built successfully.\n"
# Pre-check for required environment variables
check-env-run:
@if [ -z "$(ANSIBLE_CHATBOT_VLLM_URL)" ]; then \
printf "$(RED)Error: ANSIBLE_CHATBOT_VLLM_URL is required but not set$(NC)\n"; \
exit 1; \
fi
@if [ -z "$(ANSIBLE_CHATBOT_VLLM_API_TOKEN)" ]; then \
printf "$(RED)Error: ANSIBLE_CHATBOT_VLLM_API_TOKEN is required but not set$(NC)\n"; \
exit 1; \
fi
@if [ -z "$(ANSIBLE_CHATBOT_INFERENCE_MODEL)" ]; then \
printf "$(RED)Error: ANSIBLE_CHATBOT_INFERENCE_MODEL is required but not set$(NC)\n"; \
exit 1; \
fi
@if [ -z "$(ANSIBLE_CHATBOT_VERSION)" ]; then \
printf "$(RED)Error: ANSIBLE_CHATBOT_VERSION is required but not set$(NC)\n"; \
exit 1; \
fi
@if [ -z "$(PROVIDER_VECTOR_DB_ID)" ]; then \
printf "$(RED)Error: PROVIDER_VECTOR_DB_ID is required but not set$(NC)\n"; \
printf "Run 'make setup' to generate $(PROVIDER_VECTOR_DB_ID_FILE) or set PROVIDER_VECTOR_DB_ID manually.\n"; \
exit 1; \
fi
run: check-env-run
@echo "Running Ansible Chatbot Stack container..."
@echo "Using vLLM URL: $(ANSIBLE_CHATBOT_VLLM_URL)"
@echo "Using inference model: $(ANSIBLE_CHATBOT_INFERENCE_MODEL)"
$(CONTAINER_RUNTIME) run --platform $(PLATFORM) --security-opt label=disable -it -p $(LLAMA_STACK_PORT):8080 \
-v ./embeddings_model:/.llama/data/embeddings_model \
-v ./vector_db/aap_faiss_store.db:$(CONTAINER_DB_PATH)/aap_faiss_store.db \
-v ./$(LIGHTSPEED_STACK_CONFIG):/.llama/distributions/ansible-chatbot/config/lightspeed-stack.yaml \
-v ./$(LLAMA_STACK_RUN_CONFIG):/.llama/distributions/llama-stack/config/ansible-chatbot-run.yaml \
-v ./$(SYSTEM_PROMPT):/.llama/distributions/ansible-chatbot/system-prompts/default.txt \
--env VLLM_URL=$(ANSIBLE_CHATBOT_VLLM_URL) \
--env VLLM_API_TOKEN=$(ANSIBLE_CHATBOT_VLLM_API_TOKEN) \
--env INFERENCE_MODEL=$(ANSIBLE_CHATBOT_INFERENCE_MODEL) \
--env INFERENCE_MODEL_FILTER=$(ANSIBLE_CHATBOT_INFERENCE_MODEL_FILTER) \
--env GEMINI_API_KEY=$(GEMINI_API_KEY) \
--env OPENAI_INFERENCE_MODEL=$(OPENAI_INFERENCE_MODEL) \
--env OPENAI_API_KEY=$(OPENAI_API_KEY) \
--env OPENAI_BASE_URL=$(OPENAI_BASE_URL) \
--env PROVIDER_VECTOR_DB_ID=$(PROVIDER_VECTOR_DB_ID) \
--env CHATBOT_API_TOKEN=$(CHATBOT_API_TOKEN) \
--env OTEL_SDK_DISABLED=true \
$(IMAGE_PREFIX)ansible-chatbot-stack:$(ANSIBLE_CHATBOT_VERSION)
run-test:
@echo "Running test query against lightspeed-core/lightspeed-stack's /config endpoint..."
curl -X GET http://localhost:$(LLAMA_STACK_PORT)/v1/config | jq .
@echo "Running test query against lightspeed-core/lightspeed-stack's /models endpoint..."
curl -X GET http://localhost:$(LLAMA_STACK_PORT)/v1/models | jq .
@echo "Running test query against lightspeed-core/lightspeed-stack's /query endpoint..."
curl -X POST http://localhost:$(LLAMA_STACK_PORT)/v1/query -H "Content-Type: application/json" --data '{"query": "What is Ansible EDA?"}' | jq .
# Pre-check required environment variables for local DB run
check-env-run-local-db: check-env-run
@if [ -z "$(LOCAL_DB_PATH)" ]; then \
printf "$(RED)Error: LOCAL_DB_PATH is required but not set$(NC)\n"; \
exit 1; \
fi
@if [ -z "$(CONTAINER_DB_PATH)" ]; then \
printf "$(RED)Error: CONTAINER_DB_PATH is required but not set$(NC)\n"; \
exit 1; \
fi
local_db:
@if [ ! -d "$(LOCAL_DB_PATH)" ]; then \
echo "Creating LOCAL_DB_PATH directory..."; \
mkdir "$(LOCAL_DB_PATH)"; \
fi
local_db_store:
@if [ ! -e "$(LOCAL_DB_PATH)/aap_faiss_store.db" ]; then \
cp ./vector_db/aap_faiss_store.db "$(LOCAL_DB_PATH)/"; \
fi
run-local-db: check-env-run-local-db local_db local_db_store
@echo "Running Ansible Chatbot Stack container..."
@echo "Using vLLM URL: $(ANSIBLE_CHATBOT_VLLM_URL)"
@echo "Using inference model: $(ANSIBLE_CHATBOT_INFERENCE_MODEL)"
@echo "Using inference model for tools filtering : $(ANSIBLE_CHATBOT_INFERENCE_MODEL_FILTER)"
@echo "Mapping local DB from $(LOCAL_DB_PATH) to $(CONTAINER_DB_PATH)"
$(CONTAINER_RUNTIME) run --platform $(PLATFORM) --security-opt label=disable -it -p $(LLAMA_STACK_PORT):8080 \
-v $(LOCAL_DB_PATH):$(CONTAINER_DB_PATH) \
-v ./embeddings_model:/.llama/data/embeddings_model \
-v $(LOCAL_DB_PATH)/aap_faiss_store.db:$(CONTAINER_DB_PATH)/aap_faiss_store.db \
-v ./$(LIGHTSPEED_STACK_CONFIG):/.llama/distributions/ansible-chatbot/config/lightspeed-stack.yaml \
-v ./$(LLAMA_STACK_RUN_CONFIG):/.llama/distributions/llama-stack/config/ansible-chatbot-run.yaml \
-v ./$(SYSTEM_PROMPT):/.llama/distributions/ansible-chatbot/system-prompts/default.txt \
--env VLLM_URL=$(ANSIBLE_CHATBOT_VLLM_URL) \
--env VLLM_API_TOKEN=$(ANSIBLE_CHATBOT_VLLM_API_TOKEN) \
--env INFERENCE_MODEL=$(ANSIBLE_CHATBOT_INFERENCE_MODEL) \
--env INFERENCE_MODEL_FILTER=$(ANSIBLE_CHATBOT_INFERENCE_MODEL_FILTER) \
--env GEMINI_API_KEY=$(GEMINI_API_KEY) \
--env OPENAI_INFERENCE_MODEL=$(OPENAI_INFERENCE_MODEL) \
--env OPENAI_API_KEY=$(OPENAI_API_KEY) \
--env OPENAI_BASE_URL=$(OPENAI_BASE_URL) \
--env PROVIDER_VECTOR_DB_ID=$(PROVIDER_VECTOR_DB_ID) \
--env CHATBOT_API_TOKEN=$(CHATBOT_API_TOKEN) \
--env OTEL_SDK_DISABLED=true \
$(IMAGE_PREFIX)ansible-chatbot-stack:$(ANSIBLE_CHATBOT_VERSION)
clean:
@echo "Cleaning up..."
@echo "Cleaning up your local folders..."
rm -rf llama-stack/
rm -rf embeddings_model/
rm -rf vector_db/
rm -rf providers.d/
rm -rf work/
rm -rf local_db/
rm -f requirements.txt
@echo "Removing ansible-chatbot-stack images..."
$(CONTAINER_RUNTIME) rmi -f $$($(CONTAINER_RUNTIME) images -a -q --filter reference=ansible-chatbot-stack) || true
@echo "Removing ansible-chatbot-stack containers..."
$(CONTAINER_RUNTIME) rm -f $$($(CONTAINER_RUNTIME) ps -a -q --filter ancestor=ansible-chatbot-stack) || true
@echo "Clean-up complete."
deploy-k8s:
@echo Change configuration in `kustomization.yaml` accordingly, then deploy
kubectl kustomize . > local-chatbot-stack-deploy.yaml
@echo Deploy the service:
kubectl apply -f local-chatbot-stack-deploy.yaml
@echo "Deployment initiated. Verify using kubectl commands."
shell:
@echo "Getting a shell in the container..."
$(CONTAINER_RUNTIME) run --security-opt label=disable -it --entrypoint /bin/bash $(IMAGE_PREFIX)ansible-chatbot-stack:$(ANSIBLE_CHATBOT_VERSION)
# Pre-check required environment variables for tag-and-push
check-env-tag-and-push:
@if [ -z "$(QUAY_ORG)" ]; then \
printf "$(RED)Error: QUAY_ORG is required but not set$(NC)\n"; \
exit 1; \
fi
@if [ -z "$(ANSIBLE_CHATBOT_VERSION)" ]; then \
printf "$(RED)Error: ANSIBLE_CHATBOT_VERSION is required but not set$(NC)\n"; \
exit 1; \
fi
tag-and-push: check-env-tag-and-push
@echo "Logging in to quay.io..."
@echo "Please enter your quay.io credentials when prompted"
$(CONTAINER_RUNTIME) login quay.io
@echo "Tagging image ansible-chatbot-stack:$(ANSIBLE_CHATBOT_VERSION)"
$(CONTAINER_RUNTIME) tag ansible-chatbot-stack:$(ANSIBLE_CHATBOT_VERSION) quay.io/$(QUAY_ORG)/ansible-chatbot-stack:$(ANSIBLE_CHATBOT_VERSION)
@echo "Pushing image to quay.io..."
$(CONTAINER_RUNTIME) push quay.io/$(QUAY_ORG)/ansible-chatbot-stack:$(ANSIBLE_CHATBOT_VERSION)
@echo "Image successfully pushed to quay.io/$(QUAY_ORG)/ansible-chatbot-stack:$(ANSIBLE_CHATBOT_VERSION)"
all: setup build build-custom
@echo "All build steps completed successfully."
@printf "To run the container, use: $(RED)make run$(NC)\n"
@printf "To tag and push the container to quay.io, use: $(RED)make tag-and-push$(NC)\n"
load-test:
uv run locust -f scripts/loading_test.py -t 120 --users 10 --spawn-rate 10 -H http://localhost:8321
update-lock:
@echo "Updating uv.lock..."
uv lock
@echo "uv.lock updated successfully."
test:
@echo "Running all tests (mock OpenAI server)..."
uv run --group test pytest tests/ -v