From f45a0999546a42addfb671df42a35c7b052887a1 Mon Sep 17 00:00:00 2001 From: Damien Berezenko Date: Mon, 21 Apr 2025 10:57:45 -0500 Subject: [PATCH] r2r k8s manifests with kustomize --- .../kustomizations/helm-values_hatchet.yaml | 218 ++++++++++ .../helm-values_postgresql.yaml | 13 + .../kustomizations/include/cm-hatchet.yaml | 20 + .../include/cm-hatchet_OLD.yaml | 40 ++ .../include/cm-init-scripts-hatchet.yaml | 262 ++++++++++++ .../include/cm-init-scripts-r2r.yaml | 114 ++++++ .../k8s/kustomizations/include/cm-r2r.yaml | 61 +++ .../include/cm-unstructured.yaml | 12 + .../include/hatchet-dashboard-initc.yaml | 69 ++++ .../include/hatchet-engine-initc.yaml | 86 ++++ .../include/hatchet-init-job.yaml | 194 +++++++++ .../include/hatchet-rabbitmq-sts.yaml | 78 ++++ .../k8s/kustomizations/include/pgadmin.yaml | 42 ++ .../kustomizations/include/pgvector-sts.yaml | 98 +++++ .../include/r2r-dashboard-indep.yaml | 57 +++ .../include/r2r-graph-clustering-indep.yaml | 39 ++ .../k8s/kustomizations/include/r2r-initc.yaml | 153 +++++++ .../include/r2r-nginx-indep.yaml | 51 +++ .../include/unstructured-indep.yaml | 42 ++ .../k8s/kustomizations/kustomization.yaml | 135 +++++++ .../patches/hatchet-rabbitmq-sts.yaml | 40 ++ .../patches/rm-secret-hatchet-postgres.yaml | 5 + .../rm-secret-hatchet-rabbitmq-config.yaml | 5 + .../patches/rm-secret-hatchet-rabbitmq.yaml | 5 + .../rm-secret-hatchet-shared-config.yaml | 5 + .../k8s/kustomizations/patches/service.yaml | 10 + .../examples/externalsecret_hatchet.yaml | 143 +++++++ .../examples/externalsecret_r2r.yaml | 374 ++++++++++++++++++ .../k8s/manifests/examples/ingress-r2r.yaml | 56 +++ .../manifests/examples/secrets_hatchet.yaml | 47 +++ .../k8s/manifests/examples/secrets_r2r.yaml | 41 ++ 31 files changed, 2515 insertions(+) create mode 100644 deployment/k8s/kustomizations/helm-values_hatchet.yaml create mode 100644 deployment/k8s/kustomizations/helm-values_postgresql.yaml create mode 100644 deployment/k8s/kustomizations/include/cm-hatchet.yaml create mode 100644 deployment/k8s/kustomizations/include/cm-hatchet_OLD.yaml create mode 100644 deployment/k8s/kustomizations/include/cm-init-scripts-hatchet.yaml create mode 100644 deployment/k8s/kustomizations/include/cm-init-scripts-r2r.yaml create mode 100644 deployment/k8s/kustomizations/include/cm-r2r.yaml create mode 100644 deployment/k8s/kustomizations/include/cm-unstructured.yaml create mode 100644 deployment/k8s/kustomizations/include/hatchet-dashboard-initc.yaml create mode 100644 deployment/k8s/kustomizations/include/hatchet-engine-initc.yaml create mode 100644 deployment/k8s/kustomizations/include/hatchet-init-job.yaml create mode 100644 deployment/k8s/kustomizations/include/hatchet-rabbitmq-sts.yaml create mode 100644 deployment/k8s/kustomizations/include/pgadmin.yaml create mode 100644 deployment/k8s/kustomizations/include/pgvector-sts.yaml create mode 100644 deployment/k8s/kustomizations/include/r2r-dashboard-indep.yaml create mode 100644 deployment/k8s/kustomizations/include/r2r-graph-clustering-indep.yaml create mode 100644 deployment/k8s/kustomizations/include/r2r-initc.yaml create mode 100644 deployment/k8s/kustomizations/include/r2r-nginx-indep.yaml create mode 100644 deployment/k8s/kustomizations/include/unstructured-indep.yaml create mode 100644 deployment/k8s/kustomizations/kustomization.yaml create mode 100644 deployment/k8s/kustomizations/patches/hatchet-rabbitmq-sts.yaml create mode 100644 deployment/k8s/kustomizations/patches/rm-secret-hatchet-postgres.yaml create mode 100644 deployment/k8s/kustomizations/patches/rm-secret-hatchet-rabbitmq-config.yaml create mode 100644 deployment/k8s/kustomizations/patches/rm-secret-hatchet-rabbitmq.yaml create mode 100644 deployment/k8s/kustomizations/patches/rm-secret-hatchet-shared-config.yaml create mode 100644 deployment/k8s/kustomizations/patches/service.yaml create mode 100644 deployment/k8s/manifests/examples/externalsecret_hatchet.yaml create mode 100644 deployment/k8s/manifests/examples/externalsecret_r2r.yaml create mode 100644 deployment/k8s/manifests/examples/ingress-r2r.yaml create mode 100644 deployment/k8s/manifests/examples/secrets_hatchet.yaml create mode 100644 deployment/k8s/manifests/examples/secrets_r2r.yaml diff --git a/deployment/k8s/kustomizations/helm-values_hatchet.yaml b/deployment/k8s/kustomizations/helm-values_hatchet.yaml new file mode 100644 index 000000000..dfad620b2 --- /dev/null +++ b/deployment/k8s/kustomizations/helm-values_hatchet.yaml @@ -0,0 +1,218 @@ +# sharedConfig is inherited by all backend services: api, grpc, controllers, scheduler +sharedConfig: + # you can disable shared config by setting this to false + enabled: true + + # these are the most commonly configured values + serverUrl: "http://localhost:8080" + serverAuthCookieDomain: "localhost:8080" # the domain for the auth cookie + serverAuthCookieInsecure: "t" # allows cookies to be set over http + serverAuthSetEmailVerified: "t" # automatically sets email_verified to true for all users + serverAuthBasicAuthEnabled: "t" # allows login via basic auth (email/password) + grpcBroadcastAddress: "localhost:7070" # the endpoint for the gRPC server, exposed via the `grpc` service + grpcInsecure: "true" # allows gRPC to be served over http +# defaultAdminEmail: "" # in exposed/production environments, change this to a valid email +# defaultAdminPassword: "" # in exposed/production environments, change this to a secure password + + # you can set additional environment variables here, which will override any defaults + env: {} + +api: + enabled: true + replicaCount: 2 + image: + repository: "ghcr.io/hatchet-dev/hatchet/hatchet-api" + tag: "v0.54.7" + pullPolicy: "Always" + migrationJob: + image: + repository: "ghcr.io/hatchet-dev/hatchet/hatchet-migrate" + serviceAccount: + create: true + name: hatchet-api + envFrom: + - secretRef: + name: hatchet-shared-config + ingress: + enabled: false + health: + enabled: true + spec: + livenessProbe: + httpGet: + path: /api/live + port: 8080 + periodSeconds: 5 + initialDelaySeconds: 60 + readinessProbe: + httpGet: + path: /api/ready + port: 8080 + periodSeconds: 5 + initialDelaySeconds: 20 + +grpc: + enabled: true + nameOverride: hatchet-grpc + fullnameOverride: hatchet-grpc + replicaCount: 1 + image: + repository: "ghcr.io/hatchet-dev/hatchet/hatchet-engine" + tag: "v0.54.7" + pullPolicy: "Always" + setupJob: + enabled: false + service: + externalPort: 7070 + internalPort: 7070 + commandline: + command: ["/hatchet/hatchet-engine"] + deployment: + annotations: + app.kubernetes.io/name: hatchet-grpc + serviceAccount: + create: true + name: hatchet-grpc + envFrom: + - secretRef: + name: hatchet-shared-config + ingress: + enabled: false + health: + enabled: true + spec: + livenessProbe: + httpGet: + path: /live + port: 8733 + periodSeconds: 5 + initialDelaySeconds: 60 + readinessProbe: + httpGet: + path: /ready + port: 8733 + periodSeconds: 5 + initialDelaySeconds: 20 + +controllers: + enabled: true + nameOverride: controllers + fullnameOverride: controllers + replicaCount: 1 + image: + repository: "ghcr.io/hatchet-dev/hatchet/hatchet-engine" + tag: "v0.54.7" + pullPolicy: "Always" + setupJob: + enabled: false + service: + externalPort: 7070 + internalPort: 7070 + commandline: + command: ["/hatchet/hatchet-engine"] + deployment: + annotations: + app.kubernetes.io/name: controllers + serviceAccount: + create: true + name: controllers + envFrom: + - secretRef: + name: hatchet-shared-config + ingress: + enabled: false + health: + enabled: true + spec: + livenessProbe: + httpGet: + path: /live + port: 8733 + periodSeconds: 5 + initialDelaySeconds: 60 + readinessProbe: + httpGet: + path: /ready + port: 8733 + periodSeconds: 5 + initialDelaySeconds: 20 + +scheduler: + enabled: true + nameOverride: scheduler + fullnameOverride: scheduler + replicaCount: 1 + image: + repository: "ghcr.io/hatchet-dev/hatchet/hatchet-engine" + tag: "v0.54.7" + pullPolicy: "Always" + setupJob: + enabled: false + service: + externalPort: 7070 + internalPort: 7070 + commandline: + command: ["/hatchet/hatchet-engine"] + deployment: + annotations: + app.kubernetes.io/name: scheduler + serviceAccount: + create: true + name: scheduler + envFrom: + - secretRef: + name: hatchet-shared-config + ingress: + enabled: false + health: + enabled: true + spec: + livenessProbe: + httpGet: + path: /live + port: 8733 + periodSeconds: 5 + initialDelaySeconds: 60 + readinessProbe: + httpGet: + path: /ready + port: 8733 + periodSeconds: 5 + initialDelaySeconds: 20 + +frontend: + enabled: true + image: + repository: "ghcr.io/hatchet-dev/hatchet/hatchet-frontend" + tag: "v0.54.7" + pullPolicy: "Always" + service: + externalPort: 8080 + internalPort: 80 + ingress: + enabled: false + +postgres: + enabled: false + auth: +# username: "" +# password: "" + database: "hatchet" + tls: + enabled: false + primary: + service: + ports: + postgresql: 5432 + +rabbitmq: + enabled: true + auth: +# username: "" +# password: "" + service: + ports: + amqp: 5672 + +caddy: + enabled: false \ No newline at end of file diff --git a/deployment/k8s/kustomizations/helm-values_postgresql.yaml b/deployment/k8s/kustomizations/helm-values_postgresql.yaml new file mode 100644 index 000000000..fee10aa81 --- /dev/null +++ b/deployment/k8s/kustomizations/helm-values_postgresql.yaml @@ -0,0 +1,13 @@ +auth: + existingSecret: r2r-hatchet-secrets + secretKeys: + adminPasswordKey: HATCHET_DATABASE_POSTGRES_POSTGRES_PASSWORD + userPasswordKey: HATCHET_DATABASE_POSTGRES_PASSWORD + replicationPasswordKey: HATCHET_DATABASE_POSTGRES_REPLICA_PASSWORD + +#creates hatchet database +global: + storageClass: csi-sc + postgresql: + auth: + database: hatchet \ No newline at end of file diff --git a/deployment/k8s/kustomizations/include/cm-hatchet.yaml b/deployment/k8s/kustomizations/include/cm-hatchet.yaml new file mode 100644 index 000000000..5cb9c9017 --- /dev/null +++ b/deployment/k8s/kustomizations/include/cm-hatchet.yaml @@ -0,0 +1,20 @@ +--- +# hatchet-configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: hatchet-configmap + annotations: + argocd.argoproj.io/sync-wave: "-2" +data: + #New + HATCHET_CLIENT_TLS_STRATEGY: "none" + HATCHET_CLIENT_GRPC_MAX_RECV_MESSAGE_LENGTH: "134217728" + HATCHET_CLIENT_GRPC_MAX_SEND_MESSAGE_LENGTH: "134217728" + + HATCHET_ADMIN_INIT_ALLOW_OVERRIDE_CONF: "false" + HATCHET_ADMIN_INIT_ALLOW_OVERRIDE_CERT: "false" + HATCHET_ADMIN_INIT_ALLOW_OVERRIDE_APIKEY: "false" + HATCHET_TENANT_ID: "707d0855-80ab-4e1f-a156-f1c4546cbf52" + RABBITMQ_URL: "http://hatchet-rabbitmq" + RABBITMQ_MGMT_PORT: "15672" \ No newline at end of file diff --git a/deployment/k8s/kustomizations/include/cm-hatchet_OLD.yaml b/deployment/k8s/kustomizations/include/cm-hatchet_OLD.yaml new file mode 100644 index 000000000..7d3b93413 --- /dev/null +++ b/deployment/k8s/kustomizations/include/cm-hatchet_OLD.yaml @@ -0,0 +1,40 @@ +--- +# hatchet-configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: hatchet-configmap + annotations: + argocd.argoproj.io/sync-wave: "-2" +data: +# DATABASE_POSTGRES_HOST: "hatchet-postgres" + DATABASE_POSTGRES_HOST: "ferretdb-postgres-documentdb" + DATABASE_POSTGRES_PORT: "5432" + SERVER_AUTH_COOKIE_INSECURE: "t" + SERVER_GRPC_BIND_ADDRESS: "0.0.0.0" + SERVER_GRPC_BROADCAST_ADDRESS: "hatchet-engine:7077" + SERVER_GRPC_INSECURE: "t" + SERVER_AUTH_COOKIE_DOMAIN: "https://r2r.mywebsite.com" + SERVER_URL: "http://hatchet-dashboard:80" + + HATCHET_DATABASE_POSTGRES_HOST: "ferretdb-postgres-documentdb" + HATCHET_DATABASE_POSTGRES_PORT: "5432" + SERVER_GRPC_PORT: "7077" + SERVER_GRPC_MAX_MSG_SIZE: "134217728" + + + HATCHET_DATABASE_POSTGRES_DB_NAME: "hatchet" + #SERVER_AUTH_COOKIE_DOMAIN: "http://host.docker.internal:${R2R_HATCHET_DASHBOARD_PORT:-7274}" + #SERVER_URL: "http://host.docker.internal:${R2R_HATCHET_DASHBOARD_PORT:-7274}" + HATCHET_ADMIN_INIT_ALLOW_OVERRIDE_APIKEY: "false" + HATCHET_ADMIN_INIT_ALLOW_OVERRIDE_CONF: "false" + HATCHET_ADMIN_INIT_ALLOW_OVERRIDE_CERT: "false" + HATCHET_TENANT_ID: "707d0855-80ab-4e1f-a156-f1c4546cbf52" +# R2R_RABBITMQ_PORT: "5672" + RABBITMQ_MGMT_PORT: "15672" + RABBITMQ_URL: "http://hatchet-rabbitmq" + + #New + HATCHET_CLIENT_TLS_STRATEGY: "none" + HATCHET_CLIENT_GRPC_MAX_RECV_MESSAGE_LENGTH: "134217728" + HATCHET_CLIENT_GRPC_MAX_SEND_MESSAGE_LENGTH: "134217728" \ No newline at end of file diff --git a/deployment/k8s/kustomizations/include/cm-init-scripts-hatchet.yaml b/deployment/k8s/kustomizations/include/cm-init-scripts-hatchet.yaml new file mode 100644 index 000000000..82903bc44 --- /dev/null +++ b/deployment/k8s/kustomizations/include/cm-init-scripts-hatchet.yaml @@ -0,0 +1,262 @@ +# This file contains the initialization scripts used by the InitContainers in the Job manifests. + +apiVersion: v1 +kind: ConfigMap +metadata: + name: hatchet-init-scripts +data: + create-db.sh: | + #!/bin/sh + set -e + echo 'Waiting for PostgreSQL to be ready...' + DATABASE_POSTGRES_HOST=${DATABASE_POSTGRES_HOST:-hatchet-postgres} + while ! pg_isready -h ${DATABASE_POSTGRES_HOST} -p ${DATABASE_POSTGRES_PORT} -U ${DATABASE_POSTGRES_USERNAME:-hatchet_user}; do + sleep 1 + done + echo 'PostgreSQL is ready, checking if database exists...' + if ! PGPASSWORD=${DATABASE_POSTGRES_PASSWORD:-hatchet_password} psql -h ${DATABASE_POSTGRES_HOST} -p ${DATABASE_POSTGRES_PORT} -U ${DATABASE_POSTGRES_USERNAME:-hatchet_user} -lqt | grep -qw ${DATABASE_POSTGRES_DB_NAME:-hatchet}; then + echo 'Database does not exist, creating it...' + PGPASSWORD=${DATABASE_POSTGRES_PASSWORD:-hatchet_password} createdb -h ${DATABASE_POSTGRES_HOST} -p ${DATABASE_POSTGRES_PORT} -U ${DATABASE_POSTGRES_USERNAME:-hatchet_user} -w ${DATABASE_POSTGRES_DB_NAME:-hatchet} + else + echo 'Database already exists, skipping creation.' + fi + + setup-config.sh: | + + echo '>>> Starting config creation process...' + if [ "${HATCHET_CLIENT_TLS_STRATEGY}" = "none" ]; then + echo "HATCHET_CLIENT_TLS_STRATEGY is set to none, skipping certificate creation." + /hatchet/hatchet-admin quickstart --skip certs --generated-config-dir /hatchet/config --overwrite=${HATCHET_ADMIN_INIT_ALLOW_OVERRIDE_CONF:-false} + else + echo "HATCHET_CLIENT_TLS_STRATEGY is not none, creating certificates." + /hatchet/hatchet-admin quickstart --cert-dir /hatchet/certs --generated-config-dir /hatchet/config --overwrite=${HATCHET_ADMIN_INIT_ALLOW_OVERRIDE_CONF:-false} + fi + + setup-token.sh: | + #!/bin/sh + set -e + + echo '>>> Starting token creation process...' + # Attempt to create token and capture both stdout and stderr + TOKEN_OUTPUT=$(/hatchet/hatchet-admin token create --config /hatchet/config --tenant-id ${HATCHET_TENANT_ID:-00000000-0000-0000-0000-00000000} 2>&1) + # Extract the token (assuming it's the only part that looks like a JWT) + TOKEN=$(echo "$TOKEN_OUTPUT" | grep -Eo 'eyJ[A-Za-z0-9_-]*\.eyJ[A-Za-z0-9_-]*\.[A-Za-z0-9_-]*') + + if [ -z "$TOKEN" ]; then + echo 'Error: Failed to extract token. Full command output:' >&2 + echo "$TOKEN_OUTPUT" >&2 + exit 1 + fi + + echo "$TOKEN" > /tmp/hatchet_api_key + echo 'Token created and saved to /tmp/hatchet_api_key' + # Copy token to final destination + #mkdir -p /hatchet_api_key/ + echo -n "$TOKEN" > /hatchet_api_key/api_key.txt + echo '>>> Token copied to /hatchet_api_key/api_key.txt' + + # Verify token was copied correctly + if [ "$(cat /tmp/hatchet_api_key)" != "$(cat /hatchet_api_key/api_key.txt)" ]; then + echo 'Error: Token copy failed, files do not match' >&2 + echo 'Content of /tmp/hatchet_api_key:' + cat /tmp/hatchet_api_key + exit 1 + fi + + echo 'Hatchet API key has been saved successfully' + echo 'Token length:' ${#TOKEN} + echo 'Token (first 20 chars):' ${TOKEN:0:20} + echo 'Token structure:' $(echo $TOKEN | awk -F. '{print NF-1}') 'parts' + # Check each part of the token + for i in 1 2 3; do + PART=$(echo $TOKEN | cut -d. -f$i) + echo 'Part' $i 'length:' ${#PART} + echo 'Part' $i 'base64 check:' $(echo $PART | base64 -d >/dev/null 2>&1 && echo 'Valid' || echo 'Invalid') + done + # Final validation attempt + if ! echo $TOKEN | awk -F. '{print $2}' | base64 -d 2>/dev/null | jq . >/dev/null 2>&1; then + echo 'Warning: Token payload is not valid JSON when base64 decoded' >&2 + else + echo 'Token payload appears to be valid JSON' + fi + + # thsi relies on the Serviceaccount, Role & Bunding set up in k8s (Included) + inject-secret.sh: | + #!/bin/bash + set -e + + # Wait for required config files + MAX_WAIT=300 + WAIT_TIME=0 + CONFIG_FILES=("/hatchet/config/server.yaml" "/hatchet/config/database.yaml" "/hatchet_api_key/api_key.txt") + + while ! [[ -s "${CONFIG_FILES[0]}" && -s "${CONFIG_FILES[1]}" && -s "${CONFIG_FILES[2]}" ]]; do + (( WAIT_TIME >= MAX_WAIT )) && { echo "Timeout waiting for config files."; exit 1; } + echo "Waiting for config files to be created and not empty..."; sleep 10; (( WAIT_TIME += 10 )) + done + echo "Config files are ready." + + # Kubernetes API variables + NAMESPACE=$(cat /var/run/secrets/kubernetes.io/serviceaccount/namespace) + TOKEN=$(cat /var/run/secrets/kubernetes.io/serviceaccount/token) + API_SERVER="https://kubernetes.default.svc:${KUBERNETES_SERVICE_PORT}" + + echo ">>> Processing secret: $2 in folder: $1. ALLOW_OVERRIDE: $3" + + update_secret() { + local DIR="$1" SECRET_NAME="$2" ALLOW_OVERRIDE="${3:-false}" + ALLOW_OVERRIDE=$(echo "$ALLOW_OVERRIDE" | tr '[:upper:]' '[:lower:]') + local -a key_value_pairs=() + + echo "Processing directory: $DIR"; ls -la "$DIR" + + for f in "$DIR"/*; do + [[ -f "$f" ]] || continue + key=$(basename "$f") + value=$(base64 "$f" | tr -d '\n') + key_value_pairs+=("\"$key\":\"$value\"") + echo "Found file: $f, key: $key" + done + + local json_data=$(printf '{%s}' "$(IFS=, ; echo "${key_value_pairs[*]}")") + local json_body + json_body=$(jq -n \ + --arg name "$SECRET_NAME" \ + --arg ns "$NAMESPACE" \ + --arg data "$json_data" \ + '{apiVersion:"v1", kind:"Secret", metadata:{name:$name, namespace:$ns}, data: ($data | fromjson)}') + + #echo "Validated JSON Body: $json_body" + + # Check if the secret exists + local response + local response_code + response_code=$(curl -s -o /dev/null -w "%{http_code}" --insecure --header "Authorization: Bearer ${TOKEN}" \ + "${API_SERVER}/api/v1/namespaces/${NAMESPACE}/secrets/${SECRET_NAME}") + + if [[ "$response_code" == "200" ]]; then + [[ "$ALLOW_OVERRIDE" == "true" || "$ALLOW_OVERRIDE" == "1" ]] || { + echo "ALLOW_OVERRIDE is false. Skipping update."; return; + } + echo "Updating existing secret: $SECRET_NAME" + response=$(curl -s -X PUT --insecure --header "Authorization: Bearer ${TOKEN}" --header "Content-Type: application/json" \ + --data "$json_body" "${API_SERVER}/api/v1/namespaces/${NAMESPACE}/secrets/${SECRET_NAME}") + else + echo "Creating new secret: $SECRET_NAME" + response=$(curl -s -X POST --insecure --header "Authorization: Bearer ${TOKEN}" --header "Content-Type: application/json" \ + --data "$json_body" "${API_SERVER}/api/v1/namespaces/${NAMESPACE}/secrets") + fi + # Remove sensitive data before printing. All withing data.[*]: "[REDACTED]" + echo "JSON:" + echo "$response" | jq '.data |= with_entries(.value="[REDACTED]")' + } + + update_secret "$1" "$2" "$3" + echo "Finished processing secret: $2 in folder: $1. ALLOW_OVERRIDE: $3" + exit 0 + + check-service.sh: | + #!/bin/sh + set -e + + while true; do + if wget -q -O - "${1}" > /dev/null 2>&1; then + echo "Service is reachable at ${1}" + break + else + echo "Service is not reachable at ${1}. Retrying in 10 seconds..." + sleep 10 + fi + done + + check-file.sh: | + #!/bin/sh + set -e + + while true; do + if [ -s "${1}" ]; then + echo "File ${1} exists and is not empty." + break + else + if [ -f "${1}" ]; then + echo "File ${1} exists but is empty." + else + echo "File ${1} does not exist." + fi + echo "Retrying in 10 seconds..." + sleep 10 + fi + done + nginx.conf: | + events { + worker_connections 2048; + use epoll; + multi_accept on; + } + + http { + # Required basic settings + include /etc/nginx/mime.types; + default_type application/octet-stream; + client_max_body_size 100M; + + # Logging settings + log_format main '$remote_addr - $remote_user [$time_local] "$request" ' + '$status $body_bytes_sent "$http_referer" ' + '"$http_user_agent" "$http_x_forwarded_for"'; + access_log /var/log/nginx/access.log main; + + # Connection optimization + sendfile on; + tcp_nopush on; + tcp_nodelay on; + keepalive_timeout 65; + + upstream r2r_backend { + least_conn; + server r2r:7272 max_fails=3 fail_timeout=30s; # Use service name instead of container names + keepalive 32; + } + + server { + listen 80; + server_name localhost; + + # Timeouts + proxy_connect_timeout 300s; + proxy_send_timeout 300s; + proxy_read_timeout 300s; + + # Buffer settings + proxy_buffers 8 16k; + proxy_buffer_size 32k; + + location / { + proxy_pass http://r2r_backend; + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection 'upgrade'; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + + # Retry settings + proxy_next_upstream error timeout invalid_header http_500 http_502 http_503 http_504; + proxy_next_upstream_tries 3; + proxy_next_upstream_timeout 10s; + } + + location /health { + access_log off; + add_header 'Content-Type' 'application/json'; + return 200 '{"status":"healthy"}'; + } + + # Error responses + error_page 500 502 503 504 /50x.html; + location = /50x.html { + root /usr/share/nginx/html; + } + } + } \ No newline at end of file diff --git a/deployment/k8s/kustomizations/include/cm-init-scripts-r2r.yaml b/deployment/k8s/kustomizations/include/cm-init-scripts-r2r.yaml new file mode 100644 index 000000000..1bd1a7ae2 --- /dev/null +++ b/deployment/k8s/kustomizations/include/cm-init-scripts-r2r.yaml @@ -0,0 +1,114 @@ +# This file contains the initialization scripts used by the InitContainers in the Job manifests. + +apiVersion: v1 +kind: ConfigMap +metadata: + name: r2r-init-scripts +data: + + check-service.sh: | + #!/bin/sh + set -e + + while true; do + if wget -q -O - "${1}" > /dev/null 2>&1; then + echo "Service is reachable at ${1}" + break + else + echo "Service is not reachable at ${1}. Retrying in 10 seconds..." + sleep 10 + fi + done + + check-file.sh: | + #!/bin/sh + set -e + + while true; do + if [ -s "${1}" ]; then + echo "File ${1} exists and is not empty." + break + else + if [ -f "${1}" ]; then + echo "File ${1} exists but is empty." + else + echo "File ${1} does not exist." + fi + echo "Retrying in 10 seconds..." + sleep 10 + fi + done + + nginx.conf: | + events { + worker_connections 2048; + use epoll; + multi_accept on; + } + + http { + # Required basic settings + include /etc/nginx/mime.types; + default_type application/octet-stream; + client_max_body_size 100M; + + # Logging settings + log_format main '$remote_addr - $remote_user [$time_local] "$request" ' + '$status $body_bytes_sent "$http_referer" ' + '"$http_user_agent" "$http_x_forwarded_for"'; + access_log /var/log/nginx/access.log main; + + # Connection optimization + sendfile on; + tcp_nopush on; + tcp_nodelay on; + keepalive_timeout 65; + + upstream r2r_backend { + least_conn; + server r2r:7272 max_fails=3 fail_timeout=30s; # Use service name instead of container names + keepalive 32; + } + + server { + listen 80; + server_name localhost; + + # Timeouts + proxy_connect_timeout 300s; + proxy_send_timeout 300s; + proxy_read_timeout 300s; + + # Buffer settings + proxy_buffers 8 16k; + proxy_buffer_size 32k; + + location / { + proxy_pass http://r2r_backend; + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection 'upgrade'; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + + # Retry settings + proxy_next_upstream error timeout invalid_header http_500 http_502 http_503 http_504; + proxy_next_upstream_tries 3; + proxy_next_upstream_timeout 10s; + } + + location /health { + access_log off; + add_header 'Content-Type' 'application/json'; + return 200 '{"status":"healthy"}'; + } + + # Error responses + error_page 500 502 503 504 /50x.html; + location = /50x.html { + root /usr/share/nginx/html; + } + } + } \ No newline at end of file diff --git a/deployment/k8s/kustomizations/include/cm-r2r.yaml b/deployment/k8s/kustomizations/include/cm-r2r.yaml new file mode 100644 index 000000000..cb679ce4c --- /dev/null +++ b/deployment/k8s/kustomizations/include/cm-r2r.yaml @@ -0,0 +1,61 @@ +# r2r-configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: r2r-configmap + annotations: + argocd.argoproj.io/sync-wave: "-2" +data: +# POSTGRES_HOST: "postgres" + R2R_POSTGRES_HOST: "r2r-documentdb" + R2R_POSTGRES_PORT: "5432" +# POSTGRES_PORT: "5432" + R2R_POSTGRES_DBNAME: "r2r" + R2R_PROJECT_NAME: "r2r_default" + R2R_HOST: "0.0.0.0" + R2R_PORT: "7272" + R2R_LOG_LEVEL: INFO + + PYTHONUNBUFFERED: "1" + R2R_CONFIG_NAME: "full" +# R2R_CONFIG_PATH: "/app/r2r.toml" +# R2R_CONFIG_TOML: "/app/r2r.toml" + TELEMETRY_ENABLED: "false" + R2R_POSTGRES_PROJECT_NAME: "r2r_default" + R2R_POSTGRES_MAX_CONNECTIONS: "1024" + R2R_POSTGRES_STATEMENT_CACHE_SIZE: "100" + NEXT_PUBLIC_R2R_DEPLOYMENT_URL: "http://r2r:7272" + NEXT_PUBLIC_HATCHET_DASHBOARD_URL: "http://hatchet-dashboard:80" + R2R_DASHBOARD_PORT: "3000" + R2R_NGINX_PORT: "80" + R2R_HATCHET_DASHBOARD_PORT: "80" + + PGADMIN_ENABLE_TLS: "false" + + + # API Base URLs + OPENAI_API_BASE: "https://litellm.mywebsite.com/v1" + LITELLM_PROXY_API_BASE: "https://litellm.mywebsite.com/v1" + LITELLM_PROXY_API_URL: "https://litellm.mywebsite.com/v1" + HUGGINGFACE_API_BASE: "https://hf-tei.mywebsite.com" + + + AZURE_FOUNDRY_API_ENDPOINT: "" + AZURE_API_BASE: "" + AZURE_API_VERSION: "" + VERTEX_PROJECT: "" + VERTEX_LOCATION: "" + AWS_REGION_NAME: "" + OLLAMA_API_BASE: "" +# OLLAMA_API_BASE: "http://host.docker.internal:11434" + LM_STUDIO_API_BASE: "" + + CLUSTERING_SERVICE_URL: "http://r2r-graph-clustering:7276" # Graphologic + + R2R_SENTRY_DSN: "" + R2R_SENTRY_ENVIRONMENT: "" + R2R_SENTRY_TRACES_SAMPLE_RATE: "" + R2R_SENTRY_PROFILES_SAMPLE_RATE: "" + GOOGLE_REDIRECT_URI: "" + GITHUB_REDIRECT_URI: "" + diff --git a/deployment/k8s/kustomizations/include/cm-unstructured.yaml b/deployment/k8s/kustomizations/include/cm-unstructured.yaml new file mode 100644 index 000000000..d80018810 --- /dev/null +++ b/deployment/k8s/kustomizations/include/cm-unstructured.yaml @@ -0,0 +1,12 @@ +--- +# unstructured-configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: unstructured-configmap + annotations: + argocd.argoproj.io/sync-wave: "-2" +data: + UNSTRUCTURED_SERVICE_URL: "http://unstructured:7275" + UNSTRUCTURED_NUM_WORKERS: "10" + UNSTRUCTURED_API_URL: "https://api.unstructured.io/general/v0/general" diff --git a/deployment/k8s/kustomizations/include/hatchet-dashboard-initc.yaml b/deployment/k8s/kustomizations/include/hatchet-dashboard-initc.yaml new file mode 100644 index 000000000..6d9bdd7fd --- /dev/null +++ b/deployment/k8s/kustomizations/include/hatchet-dashboard-initc.yaml @@ -0,0 +1,69 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: hatchet-dashboard +spec: + selector: + app: hatchet-dashboard + ports: + - port: 80 + targetPort: 80 + type: ClusterIP +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: hatchet-dashboard + annotations: + argocd.argoproj.io/sync-wave: "30" +spec: + replicas: 1 + selector: + matchLabels: + app: hatchet-dashboard + template: + metadata: + labels: + app: hatchet-dashboard + spec: +# initContainers: +# - name: wait-for-config-files +# image: busybox:1.37.0 +# command: +# - /bin/sh +# - -c +# - | +# # Wait for config files to be generated by hatchet-init-job and pushed into Secret and be not empty. +# sh /init/check-file.sh /hatchet/config/server.yaml +# sh /init/check-file.sh /hatchet/config/database.yaml +# echo "Config files are ready." +# volumeMounts: +# - mountPath: /init +# name: init-scripts +# - name: config-volume +# mountPath: /hatchet/config + containers: + - name: hatchet-dashboard + image: ghcr.io/hatchet-dev/hatchet/hatchet-dashboard:v0.54.4 + command: ["sh", "./entrypoint.sh", "--config", "/hatchet/config"] + ports: + - containerPort: 80 + env: + - name: DATABASE_URL + valueFrom: + secretKeyRef: + name: hatchet-shared-config + key: DATABASE_URL + envFrom: + - secretRef: + name: hatchet-config + - secretRef: + name: hatchet-shared-config + + volumes: + - configMap: + defaultMode: 493 + name: hatchet-init-scripts + name: init-scripts + diff --git a/deployment/k8s/kustomizations/include/hatchet-engine-initc.yaml b/deployment/k8s/kustomizations/include/hatchet-engine-initc.yaml new file mode 100644 index 000000000..44226459b --- /dev/null +++ b/deployment/k8s/kustomizations/include/hatchet-engine-initc.yaml @@ -0,0 +1,86 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: hatchet-engine +spec: + selector: + app: hatchet-engine + ports: + - port: 7077 + targetPort: 7077 + type: ClusterIP +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: hatchet-engine + annotations: + argocd.argoproj.io/sync-wave: "30" +spec: + replicas: 1 + selector: + matchLabels: + app: hatchet-engine + template: + metadata: + labels: + app: hatchet-engine + spec: + initContainers: + - name: wait-for-config-files + image: busybox:1.37.0 + command: + - /bin/sh + - -c + - | + # Wait for config files to be generated by hatchet-init-job and pushed into Secret and be not empty. + sh /init/check-file.sh /hatchet/config/server.yaml + sh /init/check-file.sh /hatchet/config/database.yaml + echo "Config files are ready." + volumeMounts: + - mountPath: /init + name: init-scripts + - name: config-volume + mountPath: /hatchet/config + containers: + - name: hatchet-engine + image: ghcr.io/hatchet-dev/hatchet/hatchet-engine:v0.54.4 + command: ["/hatchet/hatchet-engine", "--config", "/hatchet/config"] + ports: + - containerPort: 7077 + envFrom: + - secretRef: + name: hatchet-secrets + - configMapRef: + name: hatchet-configmap + livenessProbe: + exec: + command: ["wget", "-q", "-O", "-", "http://localhost:8733/live"] + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 5 + readinessProbe: + exec: + command: ["wget", "-q", "-O", "-", "http://localhost:8733/live"] + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 3 + volumeMounts: + - name: certs-volume + mountPath: /hatchet/certs + - name: config-volume + mountPath: /hatchet/config + volumes: + - configMap: + defaultMode: 493 + name: hatchet-init-scripts + name: init-scripts + - name: certs-volume + secret: + secretName: r2r-hatchet-gen-cert-files + - name: config-volume + secret: + secretName: r2r-hatchet-gen-conf-files diff --git a/deployment/k8s/kustomizations/include/hatchet-init-job.yaml b/deployment/k8s/kustomizations/include/hatchet-init-job.yaml new file mode 100644 index 000000000..58950ea2c --- /dev/null +++ b/deployment/k8s/kustomizations/include/hatchet-init-job.yaml @@ -0,0 +1,194 @@ +apiVersion: batch/v1 +kind: Job +metadata: + #generate a unique name for the job + #generateName: hatchet-init-job- + name: hatchet-init-job +spec: + template: + spec: + restartPolicy: Never + serviceAccountName: hatchet-job-sa + + containers: + - name: minimal-job-container + image: busybox:1.37.0 + command: ["sh", "-c", "echo", "All init Jobs are completed"] + + initContainers: + + - name: i01-hatchet-create-db + image: postgres:17.2-alpine3.21 + envFrom: + #DATABASE_URL + #DATABASE_POSTGRES_HOST + #DATABASE_POSTGRES_PORT + #DATABASE_POSTGRES_USERNAME + #DATABASE_POSTGRES_PASSWORD + #DATABASE_POSTGRES_DB_NAME + - secretRef: + name: hatchet-shared-config + volumeMounts: + - mountPath: /init/create-db.sh + name: init-scripts + subPath: create-db.sh + command: ["/bin/sh"] + args: + - -c + - | + sh /init/create-db.sh || exit 1 + echo "Job completed successfully: Database created" + exit 0 + + - name: i02-hatchet-migration + image: ghcr.io/hatchet-dev/hatchet/hatchet-migrate:v0.54.4 + envFrom: + #DATABASE_URL + - secretRef: + name: hatchet-shared-config + + - name: i03-hatchet-setup + image: ghcr.io/hatchet-dev/hatchet/hatchet-admin:v0.54.4 + envFrom: + #DATABASE_URL + #DATABASE_POSTGRES_PORT + #DATABASE_POSTGRES_HOST + #DATABASE_POSTGRES_USERNAME + #DATABASE_POSTGRES_PASSWORD + #DATABASE_POSTGRES_DB_NAME + #SERVER_TASKQUEUE_RABBITMQ_URL + #SERVER_AUTH_COOKIE_DOMAIN + #SERVER_URL + #SERVER_AUTH_COOKIE_INSECURE + #SERVER_GRPC_BIND_ADDRESS + #SERVER_GRPC_INSECURE + #SERVER_GRPC_BROADCAST_ADDRESS + #SERVER_GRPC_MAX_MSG_SIZE + - secretRef: + name: hatchet-shared-config + #HATCHET_CLIENT_TLS_STRATEGY + #HATCHET_ADMIN_INIT_ALLOW_OVERRIDE_CONF + #HATCHET_ADMIN_INIT_ALLOW_OVERRIDE_APIKEY + #HATCHET_ADMIN_INIT_ALLOW_OVERRIDE_CERT + #HATCHET_TENANT_ID + #HATCHET_CLIENT_GRPC_MAX_RECV_MESSAGE_LENGTH + #HATCHET_CLIENT_GRPC_MAX_SEND_MESSAGE_LENGTH + #RABBITMQ_URL + #RABBITMQ_MGMT_PORT + - configMapRef: + name: hatchet-configmap + command: ["/bin/bash"] + args: + - -c + - | + apk add -q --no-interactive curl jq + # Wait for the volumes to be mounted and files to be present + sleep 5 + + # Wait for RabbitMQ to be ready. Check if management port is open. + sh /init/check-service.sh ${RABBITMQ_URL:-http://hatchet-rabbitmq}:${RABBITMQ_MGMT_PORT:-15672} + + #in case the secrets do not exists, create the directories + echo "Preparing /hatchet_api_key and /hatchet/config directories..." + mkdir -p /hatchet_api_key-cm /hatchet/certs-cm /hatchet/config-cm + mkdir -p /hatchet_api_key /hatchet/certs /hatchet/config + cp -r /hatchet_api_key-cm/. /hatchet_api_key/ + cp -r /hatchet/certs-cm/. /hatchet/certs/ + cp -r /hatchet/config-cm/. /hatchet/config/ + #chmod 666 -R /hatchet_api_key + #chmod 666 -R /hatchet/certs + #chmod 666 -R /hatchet/config + + #Generate Config + bash /init/setup-config.sh || exit 1 + echo "Job completed successfully: Config created." + + #Generate Token + bash /init/setup-token.sh || exit 1 + echo "Job completed successfully: Token created." + + #Push Config and Token into k8s Secrets + bash /init/inject-secret.sh "/hatchet_api_key" "r2r-hatchet-gen-conf-api" "${HATCHET_ADMIN_INIT_ALLOW_OVERRIDE_APIKEY:-false}" || exit 1 + echo "Job completed successfully: Token file is processed for k8s Secrets." + + bash /init/inject-secret.sh "/hatchet/config" "r2r-hatchet-gen-conf-files" "${HATCHET_ADMIN_INIT_ALLOW_OVERRIDE_CONF:-false}" || exit 1 + echo "Job completed successfully: Config files are processed for k8s Secrets." + + #Push Certificates into k8s Secrets + if [ "${HATCHET_CLIENT_TLS_STRATEGY}" = "none" ]; then + echo ">>> HATCHET_CLIENT_TLS_STRATEGY is set to none, skipping certificate processing for k8s Secrets." + else + bash /init/inject-secret.sh "/hatchet/certs" "r2r-hatchet-gen-cert-files" "${HATCHET_ADMIN_INIT_ALLOW_OVERRIDE_CERT:-false}" || exit 1 + echo "Job completed successfully: Certificate files are processed for k8s Secrets." + fi + + exit 0 + volumeMounts: + - name: init-scripts + mountPath: /init + + - name: hatchet-api-key + mountPath: /hatchet_api_key-cm + - name: certs-volume + mountPath: /hatchet/certs-cm + - name: config-volume + mountPath: /hatchet/config-cm + + volumes: + - name: init-scripts + configMap: + defaultMode: 0755 + name: hatchet-init-scripts + - name: hatchet-api-key + secret: + defaultMode: 0644 + secretName: r2r-hatchet-gen-conf-api + optional: true + - name: certs-volume + secret: + #stat -c "%a %n" * + defaultMode: 0644 + secretName: r2r-hatchet-gen-cert-files + optional: true + - name: config-volume + secret: + defaultMode: 0644 + secretName: r2r-hatchet-gen-conf-files + optional: true +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: hatchet-job-sa +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: hatchet-secret-writer +rules: + - apiGroups: [""] + resources: ["secrets"] + verbs: ["update", "patch", "get"] + resourceNames: ["r2r-hatchet-gen-conf-api", "r2r-hatchet-gen-conf-files", "r2r-hatchet-gen-cert-files"] +# - apiGroups: [""] +# resources: ["secrets"] +# verbs: ["delete"] +# resourceNames: ["r2r-hatchet-gen-conf-api", "r2r-hatchet-gen-conf-files", "r2r-hatchet-gen-cert-files"] + - apiGroups: [""] + resources: ["secrets"] + verbs: ["create"] +# - apiGroups: [""] +# resources: ["secrets"] +# verbs: ["watch", "list"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: hatchet-secret-writer-binding +subjects: + - kind: ServiceAccount + name: hatchet-job-sa +roleRef: + kind: Role + name: hatchet-secret-writer + apiGroup: rbac.authorization.k8s.io diff --git a/deployment/k8s/kustomizations/include/hatchet-rabbitmq-sts.yaml b/deployment/k8s/kustomizations/include/hatchet-rabbitmq-sts.yaml new file mode 100644 index 000000000..e81c39785 --- /dev/null +++ b/deployment/k8s/kustomizations/include/hatchet-rabbitmq-sts.yaml @@ -0,0 +1,78 @@ +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: hatchet-rabbitmq +spec: + serviceName: "hatchet-rabbitmq" + replicas: 1 + selector: + matchLabels: + app: hatchet-rabbitmq + template: + metadata: + labels: + app: hatchet-rabbitmq + spec: + hostname: hatchet-rabbitmq + containers: + - name: hatchet-rabbitmq + image: "rabbitmq:3.13.7-management-alpine" + ports: + - containerPort: 5672 + name: amqp + - containerPort: 15672 + name: management + env: + - name: RABBITMQ_DEFAULT_USER + valueFrom: + secretKeyRef: + name: hatchet-secrets + key: RABBITMQ_DEFAULT_USER + - name: RABBITMQ_DEFAULT_PASS + valueFrom: + secretKeyRef: + name: hatchet-secrets + key: RABBITMQ_DEFAULT_PASS + volumeMounts: + - name: rabbitmq-data + mountPath: /var/lib/rabbitmq + - name: rabbitmq-my-conf + mountPath: /etc/rabbitmq/conf.d/myrabbitmq.conf + subPath: myrabbitmq.conf + livenessProbe: + exec: + command: ["rabbitmqctl", "status"] + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 10 + failureThreshold: 5 + volumes: + - name: rabbitmq-my-conf + configMap: + name: hatchet-configmap + volumeClaimTemplates: + - metadata: + name: rabbitmq-data + spec: + accessModes: ["ReadWriteOnce"] + storageClassName: csi-sc + resources: + requests: + storage: 5Gi +--- +apiVersion: v1 +kind: Service +metadata: + name: hatchet-rabbitmq +spec: + clusterIP: None + selector: + app: hatchet-rabbitmq + ports: + - port: 5672 + targetPort: 5672 + name: amqp + - port: 15672 + targetPort: 15672 + name: management diff --git a/deployment/k8s/kustomizations/include/pgadmin.yaml b/deployment/k8s/kustomizations/include/pgadmin.yaml new file mode 100644 index 000000000..f59ffafa0 --- /dev/null +++ b/deployment/k8s/kustomizations/include/pgadmin.yaml @@ -0,0 +1,42 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: pgadmin +spec: + replicas: 1 + selector: + matchLabels: + app: pgadmin + template: + metadata: + labels: + app: pgadmin + spec: + containers: + - name: pgadmin + image: dpage/pgadmin4:8.14.0 + ports: + - containerPort: 80 + env: + - name: PGADMIN_DEFAULT_EMAIL + valueFrom: + secretKeyRef: + name: pgadmin-secrets + key: PGADMIN_DEFAULT_EMAIL + - name: PGADMIN_DEFAULT_PASSWORD + valueFrom: + secretKeyRef: + name: pgadmin-secrets + key: PGADMIN_DEFAULT_PASSWORD +--- +apiVersion: v1 +kind: Service +metadata: + name: pgadmin +spec: + type: NodePort + selector: + app: pgadmin + ports: + - port: 80 + targetPort: 80 \ No newline at end of file diff --git a/deployment/k8s/kustomizations/include/pgvector-sts.yaml b/deployment/k8s/kustomizations/include/pgvector-sts.yaml new file mode 100644 index 000000000..c885cc930 --- /dev/null +++ b/deployment/k8s/kustomizations/include/pgvector-sts.yaml @@ -0,0 +1,98 @@ +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: r2r-pgvector +spec: + serviceName: "r2r-pgvector" + replicas: 1 + selector: + matchLabels: + app: r2r-pgvector + template: + metadata: + labels: + app: r2r-pgvector + spec: + # Run the container as the non-root "postgres" user (UID 999) to prevent running as root. + securityContext: + runAsUser: 999 + fsGroup: 999 + containers: + - name: r2r-pgvector + image: pgvector/pgvector:0.8.0-pg17 + command: + - postgres + - -c + - "max_connections=1024" + env: + - name: POSTGRES_USER + valueFrom: + secretKeyRef: + name: r2r-secrets + key: R2R_POSTGRES_USER + - name: POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: r2r-secrets + key: R2R_POSTGRES_PASSWORD +# - name: POSTGRES_HOST +# valueFrom: +# configMapKeyRef: +# name: r2r-configmap +# key: R2R_POSTGRES_HOST + - name: POSTGRES_PORT + valueFrom: + configMapKeyRef: + name: r2r-configmap + key: R2R_POSTGRES_PORT + - name: POSTGRES_MAX_CONNECTIONS + valueFrom: + configMapKeyRef: + name: r2r-configmap + key: R2R_POSTGRES_MAX_CONNECTIONS + - name: PGPORT + valueFrom: + configMapKeyRef: + name: r2r-configmap + key: R2R_POSTGRES_PORT + ports: + - containerPort: 5432 + name: r2r-pgvector + volumeMounts: + - name: postgres-data + mountPath: /var/lib/postgresql/data + #livenessProbe: + # exec: + # command: + # - "pg_isready" + # - "-U" + # - "${POSTGRES_USER}" + # initialDelaySeconds: 10 + # timeoutSeconds: 5 + # periodSeconds: 10 + # failureThreshold: 5 + volumeClaimTemplates: + - metadata: + name: postgres-data + spec: + accessModes: + - ReadWriteOnce + storageClassName: csi-sc + resources: + requests: + storage: 5Gi +--- +# filepath: /manifests/postgres-service.yaml +apiVersion: v1 +kind: Service +metadata: + name: r2r-pgvector +spec: + clusterIP: None + selector: + app: r2r-pgvector + ports: + - port: 5432 + targetPort: 5432 + name: r2r-pgvector \ No newline at end of file diff --git a/deployment/k8s/kustomizations/include/r2r-dashboard-indep.yaml b/deployment/k8s/kustomizations/include/r2r-dashboard-indep.yaml new file mode 100644 index 000000000..5738160a2 --- /dev/null +++ b/deployment/k8s/kustomizations/include/r2r-dashboard-indep.yaml @@ -0,0 +1,57 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: r2r-dashboard +spec: + replicas: 1 + selector: + matchLabels: + app: r2r-dashboard + template: + metadata: + labels: + app: r2r-dashboard + spec: + containers: + - name: r2r-dashboard + image: emrgntcmplxty/r2r-dashboard:1.0.1 + ports: + - containerPort: 3000 + env: + - name: NEXT_PUBLIC_R2R_DEPLOYMENT_URL + valueFrom: + configMapKeyRef: + name: r2r-configmap + key: NEXT_PUBLIC_R2R_DEPLOYMENT_URL + - name: NEXT_PUBLIC_HATCHET_DASHBOARD_URL + valueFrom: + configMapKeyRef: + name: r2r-configmap + key: NEXT_PUBLIC_HATCHET_DASHBOARD_URL + # Optionally add a liveness/readiness probe as needed. + # For example: + # livenessProbe: + # httpGet: + # path: /live + # port: 3000 + # initialDelaySeconds: 10 + # periodSeconds: 10 + # readinessProbe: + # httpGet: + # path: /ready + # port: 3000 + # initialDelaySeconds: 5 + # periodSeconds: 10 +--- +apiVersion: v1 +kind: Service +metadata: + name: r2r-dashboard +spec: + selector: + app: r2r-dashboard + ports: + - port: 3000 # External port from docker-compose ${R2R_DASHBOARD_PORT:-7273} + targetPort: 3000 # Container port as set in docker-compose + type: ClusterIP diff --git a/deployment/k8s/kustomizations/include/r2r-graph-clustering-indep.yaml b/deployment/k8s/kustomizations/include/r2r-graph-clustering-indep.yaml new file mode 100644 index 000000000..6fbf50f7a --- /dev/null +++ b/deployment/k8s/kustomizations/include/r2r-graph-clustering-indep.yaml @@ -0,0 +1,39 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: r2r-graph-clustering +spec: + replicas: 1 + selector: + matchLabels: + app: r2r-graph-clustering + template: + metadata: + labels: + app: r2r-graph-clustering + spec: + containers: + - name: r2r-graph-clustering + image: ragtoriches/cluster-prod:latest + ports: + - containerPort: 7276 + livenessProbe: + exec: + command: ["curl", "-f", "http://localhost:7276/health"] + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 5 +--- +apiVersion: v1 +kind: Service +metadata: + name: r2r-graph-clustering +spec: + type: NodePort + selector: + app: r2r-graph-clustering + ports: + - port: 7276 + targetPort: 7276 diff --git a/deployment/k8s/kustomizations/include/r2r-initc.yaml b/deployment/k8s/kustomizations/include/r2r-initc.yaml new file mode 100644 index 000000000..237277648 --- /dev/null +++ b/deployment/k8s/kustomizations/include/r2r-initc.yaml @@ -0,0 +1,153 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: r2r + annotations: + argocd.argoproj.io/sync-wave: "30" + +spec: + replicas: 1 + selector: + matchLabels: + app: r2r + template: + metadata: + labels: + app: r2r + spec: + initContainers: + - name: wait-for-configs-and-services + image: busybox:1.37.0 + command: + - /bin/sh + - -c + - | + # Wait for /app/r2r.toml and /hatchet_api_key/api_key.txt to exist and be not empty. + sh /init/check-file.sh /app/r2r.toml + echo "Config file is ready." + #sh /init/check-file.sh /hatchet_api_key/api_key.txt + #echo "API key is ready." + + UNSTRUCTURED_HEALTH_URL=${UNSTRUCTURED_SERVICE_URL:-http://unstructured:7275}"/health" + echo "Checking health of the Unstructured service at: ${UNSTRUCTURED_HEALTH_URL}..." + sh /init/check-service.sh $UNSTRUCTURED_HEALTH_URL + + GRAPHCLUSTER_HEALTH_URL=${CLUSTERING_SERVICE_URL:-http://r2r-graph-clustering:7276}"/health" + echo "Checking health of the Graph-Clustering service at: ${GRAPHCLUSTER_HEALTH_URL}..." + sh /init/check-service.sh $GRAPHCLUSTER_HEALTH_URL + + env: + - name: CLUSTERING_SERVICE_URL + valueFrom: + configMapKeyRef: + name: r2r-configmap + key: CLUSTERING_SERVICE_URL + - name: UNSTRUCTURED_SERVICE_URL + valueFrom: + configMapKeyRef: + name: unstructured-configmap + key: UNSTRUCTURED_SERVICE_URL + volumeMounts: + - mountPath: /init + name: init-scripts +# - name: hatchet-api-key +# mountPath: /hatchet_api_key +# readOnly: true + - name: r2r-toml + mountPath: /app/r2r.toml + subPath: r2r.toml + readOnly: true + containers: + - name: r2r + image: "ragtoriches/prod:3.3.32" + command: + - sh + - -c + - | + #!/bin/sh + sleep 10 + if [ -z "${HATCHET_CLIENT_TOKEN}" ]; then + export HATCHET_CLIENT_TOKEN=$(cat /hatchet_api_key/api_key.txt) + fi + exec uvicorn core.main.app_entry:app --host ${R2R_HOST} --port ${R2R_PORT} + ports: + - containerPort: 7272 + envFrom: + - configMapRef: + name: unstructured-configmap + - configMapRef: + name: r2r-configmap + - secretRef: + name: r2r-secrets + env: + - name: HATCHET_CLIENT_TOKEN + valueFrom: + secretKeyRef: + name: hatchet-client-config + key: HATCHET_CLIENT_TOKEN + optional: true + - name: HATCHET_CLIENT_TLS_STRATEGY + valueFrom: + configMapKeyRef: + name: hatchet-configmap + key: HATCHET_CLIENT_TLS_STRATEGY + - name: HATCHET_CLIENT_GRPC_MAX_RECV_MESSAGE_LENGTH + valueFrom: + configMapKeyRef: + name: hatchet-configmap + key: HATCHET_CLIENT_GRPC_MAX_RECV_MESSAGE_LENGTH + - name: HATCHET_CLIENT_GRPC_MAX_SEND_MESSAGE_LENGTH + valueFrom: + configMapKeyRef: + name: hatchet-configmap + key: HATCHET_CLIENT_GRPC_MAX_SEND_MESSAGE_LENGTH + #livenessProbe: + # httpGet: + # path: /v3/health + # port: 7272 + # initialDelaySeconds: 60 + # periodSeconds: 10 + # timeoutSeconds: 5 + # failureThreshold: 5 + volumeMounts: +# - name: hatchet-api-key +# mountPath: /hatchet_api_key +# subPath: api_key.txt +# readOnly: true + - name: r2r-toml + mountPath: /app/r2r.toml + subPath: r2r.toml + readOnly: true + volumes: + - configMap: + defaultMode: 493 + name: r2r-init-scripts + name: init-scripts + - name: r2r-toml + secret: + defaultMode: 0455 + items: + - key: r2r.toml + path: r2r.toml + secretName: r2r-files +# - name: hatchet-api-key +# secret: +# defaultMode: 0755 +# items: +# - key: HATCHET_CLIENT_TOKEN +# path: api_key.txt +# secretName: hatchet-client-config +--- +# filepath: /manifests/r2r-service.yaml +apiVersion: v1 +kind: Service +metadata: + name: r2r +spec: + selector: + app: r2r + ports: + - port: 7272 + targetPort: 7272 + type: ClusterIP \ No newline at end of file diff --git a/deployment/k8s/kustomizations/include/r2r-nginx-indep.yaml b/deployment/k8s/kustomizations/include/r2r-nginx-indep.yaml new file mode 100644 index 000000000..c15502513 --- /dev/null +++ b/deployment/k8s/kustomizations/include/r2r-nginx-indep.yaml @@ -0,0 +1,51 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: r2r-nginx +spec: + replicas: 1 + selector: + matchLabels: + app: r2r-nginx + template: + metadata: + labels: + app: r2r-nginx + spec: + containers: + - name: r2r-nginx + image: nginx:1.27.3-alpine3.20-slim + ports: + - containerPort: 80 + volumeMounts: + - name: nginx-conf-volume + mountPath: /etc/nginx/nginx.conf + subPath: nginx.conf + livenessProbe: + exec: + command: ["curl", "-f", "http://localhost/health"] + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 3 + resources: + limits: + cpu: "0.5" + memory: "512Mi" + volumes: + - name: nginx-conf-volume + configMap: + name: r2r-init-scripts +--- +apiVersion: v1 +kind: Service +metadata: + name: r2r-nginx +spec: + type: NodePort + selector: + app: r2r-nginx + ports: + - port: 80 + targetPort: 80 diff --git a/deployment/k8s/kustomizations/include/unstructured-indep.yaml b/deployment/k8s/kustomizations/include/unstructured-indep.yaml new file mode 100644 index 000000000..9a7f657b7 --- /dev/null +++ b/deployment/k8s/kustomizations/include/unstructured-indep.yaml @@ -0,0 +1,42 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: unstructured +spec: + replicas: 1 + selector: + matchLabels: + app: unstructured + template: + metadata: + labels: + app: unstructured + spec: + containers: + - name: unstructured + image: ragtoriches/unst-prod + envFrom: + - configMapRef: + name: unstructured-configmap + ports: + - containerPort: 7275 + livenessProbe: + exec: + command: ["curl", "-f", "http://localhost:7275/health"] + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 5 +--- +apiVersion: v1 +kind: Service +metadata: + name: unstructured +spec: + type: NodePort + selector: + app: unstructured + ports: + - port: 7275 + targetPort: 7275 diff --git a/deployment/k8s/kustomizations/kustomization.yaml b/deployment/k8s/kustomizations/kustomization.yaml new file mode 100644 index 000000000..675acd91b --- /dev/null +++ b/deployment/k8s/kustomizations/kustomization.yaml @@ -0,0 +1,135 @@ +# kustomize build deployment/k8s/kustomizations --enable-helm > deployment/k8s/kustomizations/r2r.kustimized.yaml + +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: ai-system + +images: +# #https://hub.docker.com/r/dpage/pgadmin4/tags +# - name: dpage/pgadmin4 +# newTag: 8.14.0 +# #https://hub.docker.com/_/alpine/tags?name=3.2 +# - name: alpine +# newTag: 3.21.2 + #https://hub.docker.com/_/busybox/tags?name=1.3 + - name: busybox + newTag: 1.37.0 + #https://hub.docker.com/_/nginx/tags?name=1.27 + - name: nginx + newTag: 1.27.3-alpine3.20-slim + + #https://github.com/SciPhi-AI/R2R-Dashboard/blob/main/Dockerfile + #https://hub.docker.com/r/emrgntcmplxty/r2r-dashboard/tags + - name: emrgntcmplxty/r2r-dashboard + newTag: 1.0.0 + #https://hub.docker.com/r/ragtoriches/prod/tags?name=3. + - name: ragtoriches/prod + newTag: 3.4.0 + #https://hub.docker.com/r/ragtoriches/cluster-prod/tags + - name: ragtoriches/cluster-prod + newTag: latest + #https://github.com/SciPhi-AI/R2R/tree/main/services/unstructured + #https://hub.docker.com/r/ragtoriches/unst-prod/tags + - name: ragtoriches/unst-prod + newTag: latest + + #ghcr.io/hatchet-dev/hatchet/hatchet-dashboard + - name: ghcr.io/hatchet-dev/hatchet/hatchet-dashboard + newTag: v0.54.7 + #ghcr.io/hatchet-dev/hatchet/hatchet-engine + - name: ghcr.io/hatchet-dev/hatchet/hatchet-engine + newTag: v0.54.7 + #ghcr.io/hatchet-dev/hatchet/hatchet-admin + - name: ghcr.io/hatchet-dev/hatchet/hatchet-admin + newTag: v0.54.7 + #ghcr.io/hatchet-dev/hatchet/hatchet-migrate + - name: ghcr.io/hatchet-dev/hatchet/hatchet-migrate + newTag: v0.54.7 + #ghcr.io/hatchet-dev/hatchet/hatchet-api + - name: ghcr.io/hatchet-dev/hatchet/hatchet-api + newTag: v0.54.7 + #ghcr.io/hatchet-dev/hatchet/hatchet-frontend + - name: ghcr.io/hatchet-dev/hatchet/hatchet-frontend + newTag: v0.54.7 + + #https://hub.docker.com/r/bitnami/rabbitmq/tags?name=3. + - name: docker.io/bitnami/rabbitmq + newTag: 3.12.14-debian-12-r7 + + #https://hub.docker.com/_/postgres/tags?name=17. + - name: postgres + newTag: 0.8.0-pg16 + newName: pgvector/pgvector + #https://hub.docker.com/r/pgvector/pgvector/tags?name=pg17 +# - name: pgvector/pgvector +# newTag: 0.8.0-pg17 + +resources: + - include/cm-hatchet.yaml + - include/cm-r2r.yaml + - include/cm-unstructured.yaml + - include/cm-init-scripts-r2r.yaml + - include/cm-init-scripts-hatchet.yaml + + - include/r2r-dashboard-indep.yaml + - include/r2r-graph-clustering-indep.yaml + - include/r2r-nginx-indep.yaml + - include/unstructured-indep.yaml + + - include/r2r-initc.yaml + - include/hatchet-dashboard-initc.yaml +# - include/pgvector-sts.yaml +# - include/pgadmin.yaml +# - include/hatchet-init-job.yaml + +helmCharts: + - name: hatchet-ha + #helm repo add hatchet https://hatchet-dev.github.io/hatchet-charts + #helm repo update hatchet + #helm search repo hatchet/hatchet-ha + + repo: https://hatchet-dev.github.io/hatchet-charts + #version: 0.8.0 + version: 0.9.2 + releaseName: hatchet + namespace: ai-system + valuesFile: helm-values_hatchet.yaml + includeCRDs: true + + - name: postgresql + repo: oci://registry-1.docker.io/bitnamicharts + #helm inspect chart oci://registry-1.docker.io/bitnamicharts/postgresql + #skopeo list-tags docker://registry-1.docker.io/bitnamicharts/postgresql + #version: 16.6.3 + version: 16.6.3 + releaseName: postgresql + valuesFile: helm-values_postgresql.yaml + includeCRDs: true + # the Same Namespace + namespace: ai-system + +patches: +- path: patches/service.yaml + target: + kind: Service + +- path: patches/hatchet-rabbitmq-sts.yaml + target: + kind: StatefulSet + name: hatchet-rabbitmq + +# Remove secrets generated by Helm chart +- path: patches/rm-secret-hatchet-rabbitmq-config.yaml + target: + kind: Secret + name: hatchet-rabbitmq-config +- path: patches/rm-secret-hatchet-rabbitmq.yaml + target: + kind: Secret + name: hatchet-rabbitmq +- path: patches/rm-secret-hatchet-shared-config.yaml + target: + kind: Secret + name: hatchet-shared-config + + diff --git a/deployment/k8s/kustomizations/patches/hatchet-rabbitmq-sts.yaml b/deployment/k8s/kustomizations/patches/hatchet-rabbitmq-sts.yaml new file mode 100644 index 000000000..508363860 --- /dev/null +++ b/deployment/k8s/kustomizations/patches/hatchet-rabbitmq-sts.yaml @@ -0,0 +1,40 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: hatchet-rabbitmq +spec: + volumeClaimTemplates: + - kind: PersistentVolumeClaim + apiVersion: v1 + metadata: + name: data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 8Gi + storageClassName: csi-sc + template: + spec: + containers: + - env: + - name: RABBITMQ_USERNAME + value: "" + valueFrom: + secretKeyRef: + key: rabbitmq-user + name: hatchet-rabbitmq + name: rabbitmq + livenessProbe: + exec: + command: + - sh + - -ec + - curl -f --user ${RABBITMQ_USERNAME}:${RABBITMQ_PASSWORD} 127.0.0.1:15672/api/health/checks/virtual-hosts + readinessProbe: + exec: + command: + - sh + - -ec + - curl -f --user ${RABBITMQ_USERNAME}:${RABBITMQ_PASSWORD} 127.0.0.1:15672/api/health/checks/local-alarms \ No newline at end of file diff --git a/deployment/k8s/kustomizations/patches/rm-secret-hatchet-postgres.yaml b/deployment/k8s/kustomizations/patches/rm-secret-hatchet-postgres.yaml new file mode 100644 index 000000000..d47164f0b --- /dev/null +++ b/deployment/k8s/kustomizations/patches/rm-secret-hatchet-postgres.yaml @@ -0,0 +1,5 @@ +$patch: delete +apiVersion: v1 +kind: Secret +metadata: + name: hatchet-postgres diff --git a/deployment/k8s/kustomizations/patches/rm-secret-hatchet-rabbitmq-config.yaml b/deployment/k8s/kustomizations/patches/rm-secret-hatchet-rabbitmq-config.yaml new file mode 100644 index 000000000..2b10c78ba --- /dev/null +++ b/deployment/k8s/kustomizations/patches/rm-secret-hatchet-rabbitmq-config.yaml @@ -0,0 +1,5 @@ +$patch: delete +apiVersion: v1 +kind: Secret +metadata: + name: hatchet-rabbitmq-config diff --git a/deployment/k8s/kustomizations/patches/rm-secret-hatchet-rabbitmq.yaml b/deployment/k8s/kustomizations/patches/rm-secret-hatchet-rabbitmq.yaml new file mode 100644 index 000000000..06c67af67 --- /dev/null +++ b/deployment/k8s/kustomizations/patches/rm-secret-hatchet-rabbitmq.yaml @@ -0,0 +1,5 @@ +$patch: delete +apiVersion: v1 +kind: Secret +metadata: + name: hatchet-rabbitmq diff --git a/deployment/k8s/kustomizations/patches/rm-secret-hatchet-shared-config.yaml b/deployment/k8s/kustomizations/patches/rm-secret-hatchet-shared-config.yaml new file mode 100644 index 000000000..0cf08bafc --- /dev/null +++ b/deployment/k8s/kustomizations/patches/rm-secret-hatchet-shared-config.yaml @@ -0,0 +1,5 @@ +$patch: delete +apiVersion: v1 +kind: Secret +metadata: + name: hatchet-shared-config diff --git a/deployment/k8s/kustomizations/patches/service.yaml b/deployment/k8s/kustomizations/patches/service.yaml new file mode 100644 index 000000000..20824fe0e --- /dev/null +++ b/deployment/k8s/kustomizations/patches/service.yaml @@ -0,0 +1,10 @@ +- op: replace + path: /spec/ipFamilies + value: + - IPv4 + +- op: replace + path: /spec/ipFamilyPolicy + value: + SingleStack +# PreferDualStack \ No newline at end of file diff --git a/deployment/k8s/manifests/examples/externalsecret_hatchet.yaml b/deployment/k8s/manifests/examples/externalsecret_hatchet.yaml new file mode 100644 index 000000000..316de169c --- /dev/null +++ b/deployment/k8s/manifests/examples/externalsecret_hatchet.yaml @@ -0,0 +1,143 @@ +--- +apiVersion: external-secrets.io/v1beta1 +kind: ExternalSecret +metadata: + name: hatchet-shared-config + annotations: + argocd.argoproj.io/sync-wave: "-2" +spec: + ## kubectl -n kube-system annotate es vsphere-cpi-creds force-sync=$(date +%s) --overwrite + refreshInterval: "0" + secretStoreRef: + # This name must match the metadata.name in the `SecretStore` + name: bitwarden-secretsmanager + kind: SecretStore + #kind: ClusterSecretStore + target: + name: hatchet-shared-config + # this is how the Kind=Secret will look like + template: + engineVersion: v2 + data: + + ADMIN_EMAIL: "{{ .RABBITMQ_ADMIN_EMAIL }}" + ADMIN_PASSWORD: "{{ .RABBITMQ_ADMIN_PASSWORD }}" + DATABASE_POSTGRES_DB_NAME: "hatchet" + DATABASE_POSTGRES_HOST: "hatchet-documentdb" + DATABASE_POSTGRES_PASSWORD: "{{ .HATCHET_DATABASE_POSTGRES_PASSWORD }}" + DATABASE_POSTGRES_PORT: "5432" + DATABASE_POSTGRES_SSL_MODE: "disable" + DATABASE_POSTGRES_USERNAME: "{{ .HATCHET_DATABASE_POSTGRES_USERNAME }}" + DATABASE_URL: "postgres://{{ .HATCHET_DATABASE_POSTGRES_USERNAME }}:{{ .HATCHET_DATABASE_POSTGRES_PASSWORD }}@hatchet-documentdb:5432/hatchet?sslmode=disable" + SERVER_AUTH_BASIC_AUTH_ENABLED: "t" + SERVER_AUTH_COOKIE_DOMAIN: "localhost:8080" + SERVER_AUTH_COOKIE_INSECURE: "t" + SERVER_AUTH_SET_EMAIL_VERIFIED: "t" + SERVER_GRPC_BIND_ADDRESS: "0.0.0.0" + SERVER_GRPC_BROADCAST_ADDRESS: "controllers:7070" + SERVER_GRPC_INSECURE: "true" + SERVER_TASKQUEUE_RABBITMQ_URL: "amqp://{{ .RABBITMQ_DEFAULT_USER }}:{{ .RABBITMQ_DEFAULT_PASS }}@hatchet-rabbitmq:5672/" + SERVER_URL: "http://localhost:8080" + + + data: + - secretKey: RABBITMQ_DEFAULT_PASS + remoteRef: + key: "6203f8e5-d273-0000-0000-aaa000000000" + - secretKey: RABBITMQ_DEFAULT_USER + remoteRef: + key: "330e6465-4568-0000-0000-aaa000000000" + - secretKey: HATCHET_DATABASE_POSTGRES_USERNAME + remoteRef: + key: "261e8389-852e-0000-0000-aaa000000000" + - secretKey: HATCHET_DATABASE_POSTGRES_PASSWORD + remoteRef: + key: "5eb84a48-e16b-0000-0000-aaa000000000" + - secretKey: RABBITMQ_ADMIN_EMAIL + remoteRef: + key: "3da5e88c-1640-0000-0000-aaa000000000" + - secretKey: RABBITMQ_ADMIN_PASSWORD + remoteRef: + key: "98b55ce2-fce8-0000-0000-aaa000000000" +--- +apiVersion: external-secrets.io/v1beta1 +kind: ExternalSecret +metadata: + name: hatchet-rabbitmq-config + annotations: + argocd.argoproj.io/sync-wave: "-2" +spec: + ## kubectl -n kube-system annotate es vsphere-cpi-creds force-sync=$(date +%s) --overwrite + refreshInterval: "0" + secretStoreRef: + # This name must match the metadata.name in the `SecretStore` + name: bitwarden-secretsmanager + kind: SecretStore + #kind: ClusterSecretStore + target: + name: hatchet-rabbitmq-config + # this is how the Kind=Secret will look like + template: + engineVersion: v2 + data: + rabbitmq.conf: | + ## Username and password + default_user = {{ .RABBITMQ_DEFAULT_USER }} + ## Clustering + ## + cluster_name = hatchet-rabbitmq + cluster_formation.peer_discovery_backend = rabbit_peer_discovery_k8s + cluster_formation.k8s.host = kubernetes.default + cluster_formation.k8s.address_type = hostname + cluster_formation.k8s.service_name = hatchet-rabbitmq-headless + cluster_formation.k8s.hostname_suffix = .hatchet-rabbitmq-headless.ai-system.svc.cluster.local + cluster_formation.node_cleanup.interval = 10 + cluster_formation.node_cleanup.only_log_warning = true + cluster_partition_handling = autoheal + + # queue master locator + queue_master_locator = min-masters + # enable loopback user + loopback_users.hatchet = false + #default_vhost = ai-system-vhost + #disk_free_limit.absolute = 50MB + + data: + - secretKey: RABBITMQ_DEFAULT_USER + remoteRef: + key: "330e6465-4568-48e1-ae07-b27c001f5f08" +--- +apiVersion: external-secrets.io/v1beta1 +kind: ExternalSecret +metadata: + name: hatchet-rabbitmq + annotations: + argocd.argoproj.io/sync-wave: "-2" +spec: + ## kubectl -n kube-system annotate es vsphere-cpi-creds force-sync=$(date +%s) --overwrite + refreshInterval: "0" + secretStoreRef: + # This name must match the metadata.name in the `SecretStore` + name: bitwarden-secretsmanager + kind: SecretStore + #kind: ClusterSecretStore + target: + name: hatchet-rabbitmq + # this is how the Kind=Secret will look like + template: + engineVersion: v2 + data: + rabbitmq-erlang-cookie: "{{ .rabbitmq_erlang_cookie }}" + rabbitmq-password: "{{ .RABBITMQ_DEFAULT_PASS }}" + rabbitmq-user: "{{ .RABBITMQ_DEFAULT_USER }}" + + data: + - secretKey: rabbitmq_erlang_cookie + remoteRef: + key: "2aae42a4-8813-0000-0000-aaa000000000" + - secretKey: RABBITMQ_DEFAULT_PASS + remoteRef: + key: "6203f8e5-d273-0000-0000-aaa000000000" + - secretKey: RABBITMQ_DEFAULT_USER + remoteRef: + key: "330e6465-4568-0000-0000-aaa000000000" \ No newline at end of file diff --git a/deployment/k8s/manifests/examples/externalsecret_r2r.yaml b/deployment/k8s/manifests/examples/externalsecret_r2r.yaml new file mode 100644 index 000000000..d4c6819d5 --- /dev/null +++ b/deployment/k8s/manifests/examples/externalsecret_r2r.yaml @@ -0,0 +1,374 @@ +apiVersion: external-secrets.io/v1beta1 +kind: ExternalSecret +metadata: + name: r2r-secrets + annotations: + argocd.argoproj.io/sync-wave: "-2" +spec: + ## kubectl -n kube-system annotate es vsphere-cpi-creds force-sync=$(date +%s) --overwrite + refreshInterval: "0" + secretStoreRef: + # This name must match the metadata.name in the `SecretStore` + name: bitwarden-secretsmanager + kind: SecretStore + #kind: ClusterSecretStore + target: + name: r2r-secrets + # this is how the Kind=Secret will look like + template: + engineVersion: v2 + data: + + R2R_POSTGRES_USER: "{{ .R2R_POSTGRES_USER }}" + R2R_POSTGRES_PASSWORD: "{{ .R2R_POSTGRES_PASSWORD }}" + + OPENAI_API_KEY: "{{ .OPENAI_API_KEY }}" + LITELLM_PROXY_API_KEY: "{{ .OPENAI_API_KEY }}" + R2R_SECRET_KEY: "{{ .R2R_SECRET_KEY }}" + + ANTHROPIC_API_KEY: "" + AZURE_FOUNDRY_API_KEY: "" + AZURE_API_KEY: "" + GOOGLE_APPLICATION_CREDENTIALS: "" + GEMINI_API_KEY: "" + AWS_ACCESS_KEY_ID: "" + AWS_SECRET_ACCESS_KEY: "" + GROQ_API_KEY: "" + COHERE_API_KEY: "" + ANYSCALE_API_KEY: "" + LM_STUDIO_API_KEY: "" + HUGGINGFACE_API_KEY: "{{ .HF_TEI_LOCAL_API_KEY }}" + UNSTRUCTURED_API_KEY: "" + SERPER_API_KEY: "" + SENDGRID_API_KEY: "" + + GOOGLE_CLIENT_ID: "" + GOOGLE_CLIENT_SECRET: "" + GITHUB_CLIENT_ID: "" + GITHUB_CLIENT_SECRET: "" + + data: + - secretKey: R2R_POSTGRES_USER + remoteRef: + key: "2ef5f595-067d-0000-0000-aaa000000000" + - secretKey: R2R_POSTGRES_PASSWORD + remoteRef: + key: "5ddbf1a2-4db4-0000-0000-aaa000000000" + - secretKey: OPENAI_API_KEY + remoteRef: + key: "4d6dd102-8ba6-0000-0000-aaa000000000" + - secretKey: HF_TEI_LOCAL_API_KEY + remoteRef: + key: "d1f9c4a9-2ae2-0000-0000-aaa000000000" + - secretKey: R2R_SECRET_KEY + remoteRef: + key: "2d845d61-d204-0000-0000-aaa000000000" + +--- +apiVersion: external-secrets.io/v1beta1 +kind: ExternalSecret +metadata: + name: r2r-files + annotations: + argocd.argoproj.io/sync-wave: "-2" +spec: + ## kubectl -n kube-system annotate es vsphere-cpi-creds force-sync=$(date +%s) --overwrite + refreshInterval: "0" + secretStoreRef: + # This name must match the metadata.name in the `SecretStore` + name: bitwarden-secretsmanager + kind: SecretStore + #kind: ClusterSecretStore + target: + name: r2r-files + # this is how the Kind=Secret will look like + template: + engineVersion: v2 + data: + r2r.toml: | + [app] + # app settings are global available like `r2r_config.agent.app` + # project_name = "r2r_default" # optional, can also set with `R2R_PROJECT_NAME` env var + default_max_documents_per_user = 1_000 + default_max_chunks_per_user = 1_000_000 + default_max_collections_per_user = 100 + + # Set the default max upload size to 200 GB for local testing + default_max_upload_size = 214748364800 + + # LLM used for internal operations, like deriving conversation names + fast_llm = "openai/openai-cloudflareaig/gpt-4o-mini" + + # LLM used for user-facing output, like RAG replies + quality_llm = "openai/openai-cloudflareaig/gpt-4o" + + # LLM used for ingesting visual inputs + vlm = "openai/openai-cloudflareaig/gpt-4o" + + # LLM used for transcription + audio_lm = "openai/openai-cloudflareaig/whisper-1" + + + [agent] + #system_instruction_name = "rag_agent" # The "system" message or prompt name + agent_static_prompt = "static_rag_agent" + agent_dynamic_prompt = "dynamic_rag_agent" + # tools = ["local_search", "content", "web_search"] # uncomment to enable web search + tools = ["local_search", "content"] # Tools accessible to the agent + + [agent.generation_config] + #model = "openai/openai-cloudflareaig/gpt-4o" + model = "openai/openai-cloudflareaig/gpt-4o-mini" + #temperature = 0.7 + #top_p = 0.9 + #max_tokens_to_sample = 1_024 + #stream = false + #functions = [] + #tools = [] + #api_base = "" + #add_generation_kwargs = {} + + + [auth] + provider = "r2r" # Supported values: "r2r", "supabase" + access_token_lifetime_in_minutes = 60000 # Lifetime of access token in minutes + refresh_token_lifetime_in_days = 7 # Lifetime of refresh token in days + require_authentication = false # If true, all requests must provide valid auth + require_email_verification = false # If true, newly created users must verify email + default_admin_email = "{{ .default_admin_email }}" + default_admin_password = "{{ .default_admin_password }}" + + #[auth.extra_fields] + #supabase_url = "https://your-supabase-url.com" # Required if provider="supabase" + #supabase_key = "{{ .supabase_key }}" # Required if provider="supabase" + + + [completion] + provider = "r2r" # litellm + concurrent_request_limit = 64 # Global concurrency limit for completion requests + + [completion.generation_config] + #model = "openai/openai-cloudflareaig/gpt-4o" + model = "openai/openai-cloudflareaig/gpt-4o-mini" + temperature = 0.1 + top_p = 1 + max_tokens_to_sample = 1_024 # 4_096 + stream = false + #functions = [] # If provider supports function calling + #tools = [] # If provider supports tool usage + #api_base = "" # Custom base URL if needed + add_generation_kwargs = { } # Catch-all for extra generation params (e.g., "stop" tokens, etc.) + #response_format.type = "json_object" # Ebable strict structured JSON-mode response format: "json_object" or leave blank + + [crypto] + provider = "bcrypt" # "bcrypt" or "nacl" + # "bcrypt": uses BcryptCryptoProvider (crypto/bcrypt.py) + # "nacl": uses NaClCryptoProvider (crypto/nacl.py) + + #secret_key = "" # Master key for JWT token signing + # Default fallback from env: R2R_SECRET_KEY + # If not set, code may use a built-in default (NOT RECOMMENDED for production) + + + [database] + provider = "postgres" # "postgres", "mysql", "sqlite", or custom + default_collection_name = "Default" + default_collection_description = "Your default collection." + enable_fts = true # whether or not to enable full-text search, e.g `hybrid search` + # collection_summary_system_prompt = 'default_system' + # collection_summary_task_prompt = 'default_collection_summary' + + # KG settings + batch_size = 256 # Some ingestion/DB ops batch size (especially for large data) + + [database.graph_creation_settings] # Configuration for the model used in knowledge graph creation. + clustering_mode = "local" # "remote" or "local" + graph_entity_description_prompt = "graph_entity_description" + graph_extraction_prompt = "graph_extraction" + entity_types = [] # if empty, all entities are extracted + relation_types = [] # if empty, all relations are extracted + automatic_deduplication = true # enable automatic deduplication of entities + fragment_merge_count = 4 # number of fragments to merge into a single extraction + max_knowledge_relationships = 100 + max_knowledge_triples = 100 # max number of triples to extract for each document chunk + max_description_input_length = 49_152 + #generation_config = { model = "openai/openai-cloudflareaig/gpt-4o-mini" } + generation_config = { model = "openai/openai-cloudflareaig/gpt-4o-mini" } # and other params, model used for relationshipt extraction + #concurrent_request_limit = 2 + + [database.graph_entity_deduplication_settings] + graph_entity_deduplication_type = "by_name" # "by_name", "by_id" + graph_entity_deduplication_prompt = "graphrag_entity_deduplication" + max_description_input_length = 49_152 # increase if you want more comprehensive descriptions + #generation_config = { model = "openai/openai-cloudflareaig/gpt-4o-mini" } + generation_config = { model = "openai/openai-cloudflareaig/gpt-4o-mini" } # and other params, model used for deduplication + #concurrent_request_limit = 2 + + [database.graph_enrichment_settings] + graph_communities_prompt = "graph_communities" + max_summary_input_length = 49_152 + #generation_config = { model = "openai/openai-cloudflareaig/gpt-4o-mini" } + generation_config = { model = "openai/openai-cloudflareaig/gpt-4o-mini" } # and other params, model used for node description and graph clustering + leiden_params = {} # Parameters for the Leiden algorithm. + #concurrent_request_limit = 2 + + [database.graph_search_settings] #What is this used for? Should be configuration for the model used in knowledge graph search operations. + enabled = true + #generation_config = { model = "openai/openai-cloudflareaig/gpt-4o-mini" } + generation_config = { model = "openai/ollama-openai/sparse-llama3.1:8b-2of4-bf16" } + + [database.limits] + # Default fallback limits if no route or user-level overrides are found + global_per_min = 30_000 + monthly_limit = 100_000 + + [database.route_limits] + # Set the `v3/retrieval/search` route to have a maximum of 5 requests per minute + "/v3/retrieval/search" = { route_per_min = 120, monthly_limit = 1_000_000 } + "/v3/retrieval/rag" = { route_per_min = 30 } + + [database.user_limits."47e53676-b478-5b3f-a409-234ca2164de5"] + global_per_min = 2 + route_per_min = 1 + + + [embedding] + provider = "litellm" + concurrent_request_limit = 32 # Embedding concurrency limit + + # For basic applications, use `openai/text-embedding-3-small` with `base_dimension = 512` + + # RECOMMENDED - For advanced applications, + # use `openai/text-embedding-3-large` with `base_dimension = 3072` and binary quantization + #base_model = "openai/openai-cloudflareaig/text-embedding-3-small" + #base_dimension = 512 + #base_model = "openai/infinity/bge-en-icl" + base_model = "openai/nebius/bge-en-icl" + base_dimension = 4_096 + #api_base = "https://litellm.mywebsite.com/v1" # Optional, can be set via LITELLM_PROXY_API_BASE + #api_key = "{{ .LITELLM_PROXY_API_KEY }}" + + rerank_model = "huggingface/BAAI/bge-reranker-v2-m3" # Optional re-rank model + #rerank_url = "https://hf-tei.mywebsite.com" # Optional URL for re-rank, can be set via HUGGINGFACE_API_BASE + + batch_size = 32 # Number of texts processed per request + add_title_as_prefix = false # If true, prepend the doc title to text + concurrent_request_limit = 64 + quantization_settings = { quantization_type = "FP32" } + + [embedding.chunk_enrichment_settings] + generation_config = { model = "openai/openai-cloudflareaig/gpt-4o-mini" } + + + [completion_embedding] + # Generally this should be the same as the embedding config, but advanced users may want to run with a different provider to reduce latency + provider = "litellm" + base_model = "openai/nebius/bge-en-icl" + base_dimension = 512 + batch_size = 128 + add_title_as_prefix = false + concurrent_request_limit = 256 + + + [file] + provider = "postgres" # "postgres", "local", "s3", etc. if implemented + + + [ingestion] + provider = "r2r" + strategy = "auto" # Could be "auto", "by_title", "recursive", etc. + provider = "unstructured_local" # "r2r", "unstructured_local", "unstructured_api" + # r2r chunking_strategy: recursive only + # unstructured_local chunking_strategy: by_title or character + chunking_strategy = "by_title" # "recursive", "by_title", "character", etc. depending on the provider + chunk_size = 1_024 + chunk_overlap = 512 + excluded_parsers = ["mp4"] # Example of skipping certain file types + + automatic_extraction = true # enable automatic extraction of entities and relations + new_after_n_chars = 2_048 + max_characters = 4_096 + combine_under_n_chars = 1_024 + overlap = 1_024 + ingestion_mode = "hi-res" # "hi-res" or "lo-res" for ingestion mode + + #- `hi-res`: Thorough ingestion with full summaries and enrichment. + #- `fast`: Quick ingestion with minimal enrichment and no summaries. + #- `custom`: Full control via `ingestion_config`. + #If `filters` or `limit` (in `ingestion_config`) are provided alongside `hi-res` or `fast`, + #they will override the default settings for that mode. + # Ingestion-time document summary parameters + skip_document_summary = false + # document_summary_system_prompt = 'default_system' + # document_summary_task_prompt = 'default_summary' + # chunks_for_document_summary = 128 + document_summary_model = "openai/openai-cloudflareaig/gpt-4o-mini" # Summaries for each doc chunk + + audio_transcription_model = "openai/whisper-1" # If ingesting audio + #vision_img_model = "openai/openai-cloudflareaig/gpt-4o" + vision_img_model = "openai/ollama-openai/llama3.2-vision:90b-instruct-q4_k_m" # If vision-based models supported + #vision_pdf_model = "openai/openai-cloudflareaig/gpt-4o" + vision_pdf_model = "openai/ollama-openai/llama3.2-vision:90b-instruct-q4_k_m" + + [ingestion.chunk_enrichment_settings] + chunk_enrichment_prompt = "chunk_enrichment" + enable_chunk_enrichment = false # disabled by default + n_chunks = 2 # the number of chunks (both preceeding and succeeding) to use in enrichment + strategies = ["semantic", "neighborhood"] + forward_chunks = 3 + backward_chunks = 3 + semantic_neighbors = 10 + semantic_similarity_threshold = 0.7 + generation_config = { model = "openai/openai-cloudflareaig/gpt-4o-mini" } + + [ingestion.extra_parsers] + pdf = "zerox" # "zerox" parser override for PDFs (extended functionality) + + + [logging] + level = "DEBUG" # One of: "DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL" + provider = "r2r" + log_table = "logs" + log_info_table = "log_info" + # file = "app.log" # Log output file path + + + [orchestration] + provider = "hatchet" # "hatchet" or "simple" + kg_creation_concurrency_limit = 32 # used if "hatchet" orchestrator + ingestion_concurrency_limit = 16 # used if "hatchet" orchestrator + kg_concurrency_limit = 8 # used if "hatchet" orchestrator + + + [prompt] + provider = "r2r" + + + [email] + provider = "console_mock" # "smtp", "sendgrid", or "console_mock" + # + # - "smtp": uses AsyncSMTPEmailProvider (email/smtp.py) + # - "sendgrid": uses SendGridEmailProvider (email/sendgrid.py) + # - "console_mock": uses ConsoleMockEmailProvider (email/console_mock.py) + + # Console Mock settings (provider="console_mock") + [email.console_mock] + logs = true # If true, logs emails to console for testing + + data: + - secretKey: default_admin_email + remoteRef: + key: "1330136d-c49b-0000-0000-aaa000000000" + - secretKey: default_admin_password + remoteRef: + key: "059ba37f-a172-0000-0000-aaa000000000" + - secretKey: supabase_key + remoteRef: + key: "84c50cae-56a8-0000-0000-aaa000000000" + - secretKey: R2R_SECRET_KEY + remoteRef: + key: "2d845d61-d204-0000-0000-aaa000000000" + - secretKey: LITELLM_PROXY_API_KEY + remoteRef: + key: "4d6dd102-8ba6-0000-0000-aaa000000000" +--- \ No newline at end of file diff --git a/deployment/k8s/manifests/examples/ingress-r2r.yaml b/deployment/k8s/manifests/examples/ingress-r2r.yaml new file mode 100644 index 000000000..af31694bd --- /dev/null +++ b/deployment/k8s/manifests/examples/ingress-r2r.yaml @@ -0,0 +1,56 @@ +# Dependancy https://external-dns.io +# To add a DNS record for wren-ui.myhost.net host +# Note: without authentication, enyone can acess your app, see your data and modify your settings! +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: r2r.mywebsite.com-tls + annotations: + ### Dependancy external-dns + external-dns.alpha.kubernetes.io/filter: 'include' + external-dns.alpha.kubernetes.io/cloudflare-proxied: 'true' + external-dns.alpha.kubernetes.io/provider-cloudflare: 'true' + external-dns.alpha.kubernetes.io/target: so-ingress.mywebsite.com + #external-dns.alpha.kubernetes.io/target: so-ingress.mywebsite.com + + ### Dependancy nginx-ingress-controller + nginx.ingress.kubernetes.io/disable-lua: 'true' + nginx.ingress.kubernetes.io/enable-lua: 'false' + nginx.ingress.kubernetes.io/enable-vts-status: 'false' + nginx.ingress.kubernetes.io/enable-modsecurity: 'false' + nginx.ingress.kubernetes.io/modsecurity-snippet: | + SecRuleEngine Off + nginx.ingress.kubernetes.io/enable-owasp-modsecurity-crs: 'false' + nginx.ingress.kubernetes.io/proxy-connect-timeout: '360' + nginx.ingress.kubernetes.io/proxy-read-timeout: '360' + nginx.ingress.kubernetes.io/proxy-send-timeout: '360' + +spec: + #instead you may use other ingressClassName such as AWS alb. If other than nginx ingress is used, don't forget to comment unsupported annotations above + #"nginx" or "alb" + ingressClassName: nginx + rules: + - host: r2r.mywebsite.com + http: + paths: + - path: / + pathType: Prefix + backend: + service: + #fix the service name to match your service name + name: r2r-dashboard + port: + number: 3000 + - path: /hatchet + pathType: Prefix + backend: + service: + #fix the service name to match your service name + name: hatchet-dashboard + port: + number: 80 +### Comment TLS section if you are not going to use https + tls: + - hosts: + - r2r.mywebsite.com + secretName: r2r.mywebsite.com-tls diff --git a/deployment/k8s/manifests/examples/secrets_hatchet.yaml b/deployment/k8s/manifests/examples/secrets_hatchet.yaml new file mode 100644 index 000000000..8833753f7 --- /dev/null +++ b/deployment/k8s/manifests/examples/secrets_hatchet.yaml @@ -0,0 +1,47 @@ +--- +apiVersion: v1 +data: + ADMIN_EMAIL: ++++++++ + ADMIN_PASSWORD: ++++++++ + DATABASE_POSTGRES_DB_NAME: ++++++++ + DATABASE_POSTGRES_HOST: ++++++++ + DATABASE_POSTGRES_PASSWORD: ++++++++ + DATABASE_POSTGRES_PORT: ++++++++ + DATABASE_POSTGRES_SSL_MODE: ++++++++ + DATABASE_POSTGRES_USERNAME: ++++++++ + DATABASE_URL: ++++++++ + SERVER_AUTH_BASIC_AUTH_ENABLED: ++++++++ + SERVER_AUTH_COOKIE_DOMAIN: ++++++++ + SERVER_AUTH_COOKIE_INSECURE: ++++++++ + SERVER_AUTH_SET_EMAIL_VERIFIED: ++++++++ + SERVER_GRPC_BIND_ADDRESS: ++++++++ + SERVER_GRPC_BROADCAST_ADDRESS: ++++++++ + SERVER_GRPC_INSECURE: ++++++++ + SERVER_TASKQUEUE_RABBITMQ_URL: ++++++++ + SERVER_URL: ++++++++ +kind: Secret +metadata: + name: hatchet-shared-config + namespace: ai-system +type: Opaque + +--- +apiVersion: v1 +data: + rabbitmq.conf: ++++++++ +kind: Secret +metadata: + name: hatchet-rabbitmq-config + namespace: ai-system +type: Opaque +--- +apiVersion: v1 +data: + rabbitmq-erlang-cookie: ++++++++ + rabbitmq-password: ++++++++ + rabbitmq-user: ++++++++ +kind: Secret +metadata: + name: hatchet-rabbitmq + namespace: ai-system +type: Opaque \ No newline at end of file diff --git a/deployment/k8s/manifests/examples/secrets_r2r.yaml b/deployment/k8s/manifests/examples/secrets_r2r.yaml new file mode 100644 index 000000000..21601dd5d --- /dev/null +++ b/deployment/k8s/manifests/examples/secrets_r2r.yaml @@ -0,0 +1,41 @@ +--- +apiVersion: v1 +data: + ANTHROPIC_API_KEY: ++++++++ + ANYSCALE_API_KEY: ++++++++ + AWS_ACCESS_KEY_ID: ++++++++ + AWS_SECRET_ACCESS_KEY: ++++++++ + AZURE_API_KEY: ++++++++ + AZURE_FOUNDRY_API_KEY: ++++++++ + COHERE_API_KEY: ++++++++ + GEMINI_API_KEY: ++++++++ + GITHUB_CLIENT_ID: ++++++++ + GITHUB_CLIENT_SECRET: ++++++++ + GOOGLE_APPLICATION_CREDENTIALS: ++++++++ + GOOGLE_CLIENT_ID: ++++++++ + GOOGLE_CLIENT_SECRET: ++++++++ + GROQ_API_KEY: ++++++++ + HUGGINGFACE_API_KEY: ++++++++ + LITELLM_PROXY_API_KEY: ++++++++ + LM_STUDIO_API_KEY: ++++++++ + OPENAI_API_KEY: ++++++++ + R2R_POSTGRES_PASSWORD: ++++++++ + R2R_POSTGRES_USER: ++++++++ + R2R_SECRET_KEY: ++++++++ + SENDGRID_API_KEY: ++++++++ + SERPER_API_KEY: ++++++++ + UNSTRUCTURED_API_KEY: ++++++++ +kind: Secret +metadata: + name: r2r-secrets + namespace: ai-system +type: Opaque +--- +apiVersion: v1 +data: + r2r.toml: ++++++++ +kind: Secret +metadata: + name: r2r-files + namespace: ai-system +type: Opaque \ No newline at end of file