From 5145b7246cf53e9feef8df3076db9226f593ae9e Mon Sep 17 00:00:00 2001 From: Adrian Cole Date: Mon, 17 Feb 2025 14:41:28 +0800 Subject: [PATCH 1/3] chatbot-rag-app: adds Kubernetes manifest and instructions Signed-off-by: Adrian Cole --- docker/README.md | 6 +- example-apps/chatbot-rag-app/Dockerfile | 4 +- example-apps/chatbot-rag-app/README.md | 62 +++++- example-apps/chatbot-rag-app/env.example | 6 +- example-apps/chatbot-rag-app/k8s-manifest.yml | 57 ++++++ k8s/README.md | 47 +++++ k8s/k8s-manifest-elastic.yml | 191 ++++++++++++++++++ 7 files changed, 365 insertions(+), 8 deletions(-) create mode 100644 example-apps/chatbot-rag-app/k8s-manifest.yml create mode 100644 k8s/README.md create mode 100644 k8s/k8s-manifest-elastic.yml diff --git a/docker/README.md b/docker/README.md index 75062ab5..41824c62 100644 --- a/docker/README.md +++ b/docker/README.md @@ -9,8 +9,11 @@ Note: If you haven't checked out this repository, all you need is one file: wget https://raw.githubusercontent.com/elastic/elasticsearch-labs/refs/heads/main/docker/docker-compose-elastic.yml ``` -Use docker compose to run Elastic stack in the background: +Before you begin, ensure you have free CPU and memory on your Docker host. If +you plan to use ELSER, assume a minimum of 8 cpus and 6GB memory for the +containers in this compose file. +First, start this Elastic Stack in the background: ```bash docker compose -f docker-compose-elastic.yml up --force-recreate --wait -d ``` @@ -20,7 +23,6 @@ Then, you can view Kibana at http://localhost:5601/app/home#/ If asked for a username and password, use username: elastic and password: elastic. Clean up when finished, like this: - ```bash docker compose -f docker-compose-elastic.yml down ``` diff --git a/example-apps/chatbot-rag-app/Dockerfile b/example-apps/chatbot-rag-app/Dockerfile index 21014434..4f1a669b 100644 --- a/example-apps/chatbot-rag-app/Dockerfile +++ b/example-apps/chatbot-rag-app/Dockerfile @@ -5,9 +5,7 @@ COPY frontend ./frontend RUN cd frontend && yarn install RUN cd frontend && REACT_APP_API_HOST=/api yarn build -# langchain and vertexai depend on a large number of system packages including -# linux-headers, g++, geos, geos-dev, rust and cargo. These are already present -# on -slim and adding them to -alpine results in a larger image than -slim. +# Use glibc-based image to get pre-compiled wheels for grpcio and tiktoken FROM python:3.12-slim WORKDIR /app diff --git a/example-apps/chatbot-rag-app/README.md b/example-apps/chatbot-rag-app/README.md index 869a94c7..a67c2cc9 100644 --- a/example-apps/chatbot-rag-app/README.md +++ b/example-apps/chatbot-rag-app/README.md @@ -22,8 +22,8 @@ Copy [env.example](env.example) to `.env` and fill in values noted inside. ## Installing and connecting to Elasticsearch There are a number of ways to install Elasticsearch. Cloud is best for most -use-cases. We also have [docker-compose-elastic.yml](../../docker), that starts -Elasticsearch, Kibana, and APM Server on your laptop with one command. +use-cases. We also have [docker-compose-elastic.yml][docker-compose-elastic], +that starts Elasticsearch, Kibana, and APM Server on your laptop in one step. Once you decided your approach, edit your `.env` file accordingly. @@ -71,6 +71,62 @@ Clean up when finished, like this: docker compose down ``` +### Run with Kubernetes + +Kubernetes is more complicated than Docker, but closer to the production +experience for many users. [k8s-manifest.yml](k8s-manifest.yml) creates the +same services, but needs additional configuration first. + +First step is to setup your environment. [env.example](env.example) must be +copied to a file name `.env` and updated with `ELASTICSEARCH_URL` and +`OTEL_EXPORTER_OTLP_ENDPOINT` values visible to you Kubernetes deployment. + +For example, if you started your Elastic Stack with [k8s-manifest-elastic.yml][k8s-manifest-elastic], +you would update these values: +``` +ELASTICSEARCH_URL=http://elasticsearch:9200 +OTEL_EXPORTER_OTLP_ENDPOINT=http://apm-server:8200 +``` + +Then, import your `.env` file as a configmap like this: +```bash +kubectl create configmap chatbot-rag-app-env --from-env-file=.env +``` + +If you are using Vertex AI, make a secret for authentication: +```bash +kubectl create secret generic gcloud-credentials \ + --from-file=application_default_credentials.json=$HOME/.config/gcloud/application_default_credentials.json +``` + +Now that your configuration is applied, create the chatbot-rag-app deployment +and service by applying this manifest: +```bash +kubectl apply -f k8s-manifest.yml +``` + +Next, block until chatbot-rag-app is available. +```bash +kubectl wait --for=condition=available --timeout=20m deployment/chatbot-rag-app +``` + +*Note*: The first run may take several minutes to become available. Here's how +to follow logs on this stage: +```bash +kubectl logs deployment.apps/chatbot-rag-app -c create-index -f +``` + +Next, forward the kibana port: +```bash +kubectl port-forward service/kibana 5601:5601 & +``` + +Clean up when finished, like this: + +```bash +kubectl delete -f k8s-manifest.yml +``` + ### Run with Python If you want to run this example with Python, you need to do a few things listed @@ -196,3 +252,5 @@ docker compose up --build --force-recreate --- [loader-docs]: https://python.langchain.com/docs/how_to/#document-loaders [install-es]: https://www.elastic.co/search-labs/tutorials/install-elasticsearch +[docker-compose-elastic]: ../../docker/docker-compose-elastic.yml +[k8s-manifest-elastic]: ../../k8s/k8s-manifest-elastic.yml diff --git a/example-apps/chatbot-rag-app/env.example b/example-apps/chatbot-rag-app/env.example index 0cd2fb75..6e4d2274 100644 --- a/example-apps/chatbot-rag-app/env.example +++ b/example-apps/chatbot-rag-app/env.example @@ -6,6 +6,8 @@ FLASK_APP=api/app.py PYTHONUNBUFFERED=1 # How you connect to Elasticsearch: change details to your instance +# This defaults to a Elastic Stack accessible via localhost. When this is +# running inside Kubernetes, update to http://elasticsearch:9200 or similar. ELASTICSEARCH_URL=http://localhost:9200 ELASTICSEARCH_USER=elastic ELASTICSEARCH_PASSWORD=elastic @@ -68,7 +70,9 @@ OTEL_SDK_DISABLED=true # Assign the service name that shows up in Kibana OTEL_SERVICE_NAME=chatbot-rag-app -# Default to send traces to the Elastic APM server +# Default to send logs, traces and metrics to an Elastic APM server accessible +# via localhost. If using running inside k8s, update to http://apm-server:8200 +# or similar. OTEL_EXPORTER_OTLP_ENDPOINT=http://localhost:8200 OTEL_EXPORTER_OTLP_PROTOCOL=http/protobuf diff --git a/example-apps/chatbot-rag-app/k8s-manifest.yml b/example-apps/chatbot-rag-app/k8s-manifest.yml new file mode 100644 index 00000000..9bac5026 --- /dev/null +++ b/example-apps/chatbot-rag-app/k8s-manifest.yml @@ -0,0 +1,57 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: chatbot-rag-app +spec: + replicas: 1 + selector: + matchLabels: + app: chatbot-rag-app + template: + metadata: + labels: + app: chatbot-rag-app + spec: + # The below will recreate your secret based on the gcloud credentials file + # kubectl create secret generic gcloud-credentials \ + # --from-file=application_default_credentials.json=$HOME/.config/gcloud/application_default_credentials.json + volumes: + - name: gcloud-credentials + secret: + secretName: gcloud-credentials + initContainers: + - name: create-index + image: &image ghcr.io/elastic/elasticsearch-labs/chatbot-rag-app:latest + args: ["flask", "create-index"] + env: + - name: FLASK_APP + value: api/app.py + # This recreates your configmap based on your .env file: + # kubectl create configmap chatbot-rag-app-env --from-env-file=.env + envFrom: &envFrom + - configMapRef: + name: chatbot-rag-app-env + volumeMounts: &volumeMounts + - name: gcloud-credentials + mountPath: /root/.config/application_default_credentials.json + readOnly: true + containers: + - name: api-frontend + image: *image + ports: + - containerPort: 4000 + envFrom: *envFrom + volumeMounts: *volumeMounts +--- +apiVersion: v1 +kind: Service +metadata: + name: api +spec: + selector: + app: chatbot-rag-app + ports: + - protocol: TCP + port: 4000 + targetPort: 4000 diff --git a/k8s/README.md b/k8s/README.md new file mode 100644 index 00000000..7ca51057 --- /dev/null +++ b/k8s/README.md @@ -0,0 +1,47 @@ +# Running your own Elastic Stack with Kubernetes + +If you'd like to start Elastic with Kubernetes, you can use the provided +[manifest-elastic.yml](manifest-elastic.yml) file. This starts +Elasticsearch, Kibana, and APM Server in an existing Kubernetes cluster. + +Note: If you haven't checked out this repository, all you need is one file: +```bash +wget https://raw.githubusercontent.com/elastic/elasticsearch-labs/refs/heads/main/docker/docker-compose-elastic.yml +``` + +Before you begin, ensure you have free CPU and memory in your cluster. If you +plan to use ELSER, assume a minimum of 8 cpus and 6GB memory for the containers +in this manifest. + +First, start this Elastic Stack in the background: +```bash +kubectl apply -f k8s-manifest-elastic.yml +``` + +**Note**: For simplicity, this adds an Elastic Stack to the default namespace. +Commands after here are simpler due to this. If you want to choose a different +one, use `kubectl`'s `--namespace` flag! + +Next, block until the whole stack is available. First install or changing the +Elastic Stack version can take a long time due to image pulling. +```bash +kubectl wait --for=condition=available --timeout=10m \ + deployment/elasticsearch \ + deployment/kibana \ + deployment/apm-server +``` + +Next, forward the kibana port: +```bash +kubectl port-forward service/kibana 5601:5601 & +``` + +Finally, you can view Kibana at http://localhost:5601/app/home#/ + +If asked for a username and password, use username: elastic and password: elastic. + +Clean up when finished, like this: + +```bash +kubectl delete -f k8s-manifest-elastic.yml +``` diff --git a/k8s/k8s-manifest-elastic.yml b/k8s/k8s-manifest-elastic.yml new file mode 100644 index 00000000..151192b4 --- /dev/null +++ b/k8s/k8s-manifest-elastic.yml @@ -0,0 +1,191 @@ +# This is a simple k8s manifest to start Elasticsearch, Kibana and APM server +# with the same configuration as ../docker/docker-compose-elastic.yml +# +# For this reason, if trying to understand why a setting exists, look at the +# docker variant first. Similarly, updates to the docker variant should happen +# here as well. + +apiVersion: v1 +kind: Service +metadata: + name: elasticsearch +spec: + ports: + - port: 9200 + targetPort: 9200 + selector: + app: elasticsearch + +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: elasticsearch +spec: + replicas: 1 + selector: + matchLabels: + app: elasticsearch + template: + metadata: + labels: + app: elasticsearch + spec: + containers: + - name: elasticsearch + image: docker.elastic.co/elasticsearch/elasticsearch:8.17.2 + ports: + - containerPort: 9200 + env: + - name: node.name + value: elasticsearch + - name: cluster.name + value: docker-cluster + - name: discovery.type + value: single-node + - name: ELASTIC_PASSWORD + value: elastic + - name: bootstrap.memory_lock + value: "true" + - name: xpack.security.enabled + value: "true" + - name: xpack.security.http.ssl.enabled + value: "false" + - name: xpack.security.transport.ssl.enabled + value: "false" + - name: xpack.license.self_generated.type + value: trial + # Note that ELSER is recommended to have 2GB, but it is JNI (PyTorch). + # ELSER's memory is in addition to the heap and other overhead. + - name: ES_JAVA_OPTS + value: "-Xms2g -Xmx2g" + securityContext: + capabilities: + add: ["CHOWN", "DAC_OVERRIDE", "SETGID", "SETUID"] + drop: ["ALL"] + readinessProbe: + exec: + command: ["sh", "-c", "curl -s http://localhost:9200 | grep -q 'missing authentication credentials'"] + initialDelaySeconds: 5 + periodSeconds: 1 + timeoutSeconds: 10 + failureThreshold: 120 + +--- +apiVersion: v1 +kind: Service +metadata: + name: kibana +spec: + ports: + - port: 5601 + targetPort: 5601 + selector: + app: kibana + +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: kibana +spec: + replicas: 1 + selector: + matchLabels: + app: kibana + template: + metadata: + labels: + app: kibana + spec: + initContainers: + - name: setup-kibana-system-user + image: docker.elastic.co/elasticsearch/elasticsearch:8.17.2 + command: + - bash + - -xc + - | + echo "Setup the kibana_system password"; + until curl -s -u "elastic:elastic" -X POST http://elasticsearch:9200/_security/user/kibana_system/_password -d "{\"password\":\"elastic\"}" -H "Content-Type: application/json" | grep -q "^{}"; do sleep 5; done; + containers: + - name: kibana + image: docker.elastic.co/kibana/kibana:8.17.2 + ports: + - containerPort: 5601 + env: + - name: SERVERNAME + value: kibana + - name: ELASTICSEARCH_HOSTS + value: http://elasticsearch:9200 + - name: ELASTICSEARCH_USERNAME + value: kibana_system + - name: ELASTICSEARCH_PASSWORD + value: elastic + - name: MONITORING_UI_CONTAINER_ELASTICSEARCH_ENABLED + value: "true" + - name: XPACK_SECURITY_ENCRYPTIONKEY + value: fhjskloppd678ehkdfdlliverpoolfcr + - name: XPACK_ENCRYPTEDSAVEDOBJECTS_ENCRYPTIONKEY + value: fhjskloppd678ehkdfdlliverpoolfcr + - name: SERVER_PUBLICBASEURL + value: http://127.0.0.1:5601 + readinessProbe: + exec: + command: ["sh", "-c", "curl -s http://localhost:5601/api/status | grep -q 'available'"] + initialDelaySeconds: 1 + periodSeconds: 1 + failureThreshold: 300 + +--- +apiVersion: v1 +kind: Service +metadata: + name: apm-server +spec: + ports: + - port: 8200 + targetPort: 8200 + selector: + app: apm-server + +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: apm-server +spec: + replicas: 1 + selector: + matchLabels: + app: apm-server + template: + metadata: + labels: + app: apm-server + spec: + containers: + - name: apm-server + image: docker.elastic.co/apm/apm-server:8.17.2 + ports: + - containerPort: 8200 + env: + - name: apm-server.kibana.enabled + value: "true" + - name: apm-server.kibana.host + value: http://kibana:5601 + - name: apm-server.kibana.username + value: elastic + - name: apm-server.kibana.password + value: elastic + - name: output.elasticsearch.hosts + value: http://elasticsearch:9200 + - name: output.elasticsearch.username + value: elastic + - name: output.elasticsearch.password + value: elastic + readinessProbe: + tcpSocket: + port: 8200 + initialDelaySeconds: 1 + periodSeconds: 1 + failureThreshold: 300 From 74d3f310718d3b957e559ad652d7311630f7cf0f Mon Sep 17 00:00:00 2001 From: Adrian Cole Date: Wed, 26 Feb 2025 13:51:46 +0800 Subject: [PATCH 2/3] block Signed-off-by: Adrian Cole --- example-apps/chatbot-rag-app/README.md | 4 ++-- k8s/k8s-manifest-elastic.yml | 9 +++++++++ 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/example-apps/chatbot-rag-app/README.md b/example-apps/chatbot-rag-app/README.md index a67c2cc9..42dd303d 100644 --- a/example-apps/chatbot-rag-app/README.md +++ b/example-apps/chatbot-rag-app/README.md @@ -116,9 +116,9 @@ to follow logs on this stage: kubectl logs deployment.apps/chatbot-rag-app -c create-index -f ``` -Next, forward the kibana port: +Next, forward the web UI port: ```bash -kubectl port-forward service/kibana 5601:5601 & +kubectl port-forward deployment.apps/chatbot-rag-app 4000:4000 & ``` Clean up when finished, like this: diff --git a/k8s/k8s-manifest-elastic.yml b/k8s/k8s-manifest-elastic.yml index 151192b4..7a23452f 100644 --- a/k8s/k8s-manifest-elastic.yml +++ b/k8s/k8s-manifest-elastic.yml @@ -163,6 +163,15 @@ spec: labels: app: apm-server spec: + initContainers: + - name: await-kibana + image: docker.elastic.co/elasticsearch/elasticsearch:8.17.2 + command: + - bash + - -xc + - | + echo "Waiting for kibana to be available"; + until curl -s http://kibana:5601/api/status | grep -q 'available'; do sleep 5; done; containers: - name: apm-server image: docker.elastic.co/apm/apm-server:8.17.2 From 8b199992fdbfd80faf719ddd02501fc4cd14e4c6 Mon Sep 17 00:00:00 2001 From: Anuraag Agrawal Date: Fri, 28 Feb 2025 10:45:27 +0900 Subject: [PATCH 3/3] Fix apm-server args --- k8s/k8s-manifest-elastic.yml | 32 +++++++++++++++++--------------- 1 file changed, 17 insertions(+), 15 deletions(-) diff --git a/k8s/k8s-manifest-elastic.yml b/k8s/k8s-manifest-elastic.yml index 7a23452f..26eb10bc 100644 --- a/k8s/k8s-manifest-elastic.yml +++ b/k8s/k8s-manifest-elastic.yml @@ -172,26 +172,28 @@ spec: - | echo "Waiting for kibana to be available"; until curl -s http://kibana:5601/api/status | grep -q 'available'; do sleep 5; done; + echo "Found kibana"; containers: - name: apm-server image: docker.elastic.co/apm/apm-server:8.17.2 ports: - containerPort: 8200 - env: - - name: apm-server.kibana.enabled - value: "true" - - name: apm-server.kibana.host - value: http://kibana:5601 - - name: apm-server.kibana.username - value: elastic - - name: apm-server.kibana.password - value: elastic - - name: output.elasticsearch.hosts - value: http://elasticsearch:9200 - - name: output.elasticsearch.username - value: elastic - - name: output.elasticsearch.password - value: elastic + command: + - /usr/local/bin/docker-entrypoint + - -E + - setup.kibana.enabled=true + - -E + - setup.kibana.host=kibana:5601 + - -E + - setup.kibana.username=elastic + - -E + - setup.kibana.password=elastic + - -E + - output.elasticsearch.hosts=[elasticsearch:9200] + - -E + - output.elasticsearch.username=elastic + - -E + - output.elasticsearch.password=elastic readinessProbe: tcpSocket: port: 8200