|
| 1 | +#!/usr/bin/env bash |
| 2 | +# Stand up the Cognee/Ladybug ingestion pipeline (MinIO + RabbitMQ + dispatcher) |
| 3 | +# and optionally push a sample file through the MinIO bucket to prove the |
| 4 | +# RabbitMQ → dispatcher → Cognee path is healthy. |
| 5 | + |
| 6 | +set -euo pipefail |
| 7 | + |
| 8 | +ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" |
| 9 | +cd "$ROOT_DIR" |
| 10 | + |
| 11 | +PROJECT_ID="${PROJECT_ID:-demo123}" |
| 12 | +VERIFY=0 |
| 13 | + |
| 14 | +usage() { |
| 15 | + cat <<'USAGE' |
| 16 | +Usage: scripts/setup_auto_ingest.sh [--verify] [--project <id>] |
| 17 | +
|
| 18 | + --verify Upload a sample file into MinIO after services start. |
| 19 | + --project <id> Project ID for the verification upload (default: demo123). |
| 20 | +
|
| 21 | +Environment overrides: |
| 22 | + PROJECT_ID Same as --project. |
| 23 | + AWS_ENDPOINT Override the MinIO endpoint (default http://minio:9000 inside Docker network). |
| 24 | +USAGE |
| 25 | +} |
| 26 | + |
| 27 | +while [[ $# -gt 0 ]]; do |
| 28 | + case "$1" in |
| 29 | + --verify) |
| 30 | + VERIFY=1 |
| 31 | + shift |
| 32 | + ;; |
| 33 | + --project) |
| 34 | + PROJECT_ID="$2" |
| 35 | + shift 2 |
| 36 | + ;; |
| 37 | + -h|--help) |
| 38 | + usage |
| 39 | + exit 0 |
| 40 | + ;; |
| 41 | + *) |
| 42 | + echo "Unknown option: $1" >&2 |
| 43 | + usage |
| 44 | + exit 1 |
| 45 | + ;; |
| 46 | + esac |
| 47 | +done |
| 48 | + |
| 49 | +AWS_ENDPOINT="${AWS_ENDPOINT:-http://minio:9000}" |
| 50 | + |
| 51 | +require_file() { |
| 52 | + if [[ ! -f "$1" ]]; then |
| 53 | + echo "Missing $1. Copy volumes/env/.env.template to volumes/env/.env first." >&2 |
| 54 | + exit 1 |
| 55 | + fi |
| 56 | +} |
| 57 | + |
| 58 | +run() { |
| 59 | + echo "[$(date +%H:%M:%S)] $*" |
| 60 | + "$@" |
| 61 | +} |
| 62 | + |
| 63 | +require_file "volumes/env/.env" |
| 64 | + |
| 65 | +echo "Bootstrapping auto-ingestion stack from $ROOT_DIR" |
| 66 | +run docker compose up -d |
| 67 | +run docker compose -f docker/docker-compose.cognee.yml up -d |
| 68 | + |
| 69 | +# Ensure MinIO buckets, lifecycle policies, and AMQP events are in place. |
| 70 | +run docker compose up minio-setup |
| 71 | + |
| 72 | +# Make sure the dispatcher is online (restarts to pick up env/file changes). |
| 73 | +run docker compose up -d ingestion-dispatcher |
| 74 | + |
| 75 | +echo "Current ingestion dispatcher status:" |
| 76 | +docker compose ps ingestion-dispatcher |
| 77 | + |
| 78 | +if [[ "$VERIFY" -eq 1 ]]; then |
| 79 | + TMP_FILE="$(mktemp)" |
| 80 | + SAMPLE_KEY="files/ingest_smoketest_$(date +%s).txt" |
| 81 | + cat <<EOF >"$TMP_FILE" |
| 82 | +Automatic ingestion smoke test at $(date) |
| 83 | +Project: $PROJECT_ID |
| 84 | +EOF |
| 85 | + |
| 86 | + echo "Uploading $SAMPLE_KEY into s3://projects/$PROJECT_ID via aws-cli container..." |
| 87 | + run docker run --rm --network fuzzforge_temporal_network \ |
| 88 | + -e AWS_ACCESS_KEY_ID=fuzzforge \ |
| 89 | + -e AWS_SECRET_ACCESS_KEY=fuzzforge123 \ |
| 90 | + -e AWS_DEFAULT_REGION=us-east-1 \ |
| 91 | + -v "$TMP_FILE:/tmp/sample.txt:ro" \ |
| 92 | + amazon/aws-cli s3 cp /tmp/sample.txt "s3://projects/${PROJECT_ID}/${SAMPLE_KEY}" \ |
| 93 | + --endpoint-url "$AWS_ENDPOINT" |
| 94 | + |
| 95 | + rm -f "$TMP_FILE" |
| 96 | + cat <<EOF |
| 97 | +
|
| 98 | +Sample file enqueued. Watch the dispatcher logs with: |
| 99 | + docker logs -f fuzzforge-ingestion-dispatcher |
| 100 | +
|
| 101 | +Datasets will appear via: |
| 102 | + curl -s -X POST http://localhost:18000/api/v1/auth/login \\ |
| 103 | + -d "username=project_${PROJECT_ID}@fuzzforge.dev&password=\$(python3 - <<'PY' |
| 104 | +from hashlib import sha256 |
| 105 | +print(sha256(b"$PROJECT_ID").hexdigest()[:20]) |
| 106 | +PY |
| 107 | +)" | python3 -c 'import sys,json; print(json.load(sys.stdin)["access_token"])' |
| 108 | +EOF |
| 109 | +fi |
| 110 | + |
| 111 | +echo "Auto-ingestion stack ready." |
0 commit comments