fix(tests): make public api test transparent and add retry #586
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
name: Monke – parallel runners | |
on: | |
workflow_dispatch: | |
pull_request: | |
paths: | |
- "monke/**" | |
- ".github/workflows/**" | |
- "docker/**" | |
- "backend/**" | |
- "frontend/**" | |
- "start.sh" | |
concurrency: | |
group: monke-${{ github.ref }} | |
cancel-in-progress: true | |
jobs: | |
# Job 1: Build images once and determine connectors | |
build-and-determine: | |
runs-on: ubuntu-latest | |
permissions: | |
contents: read | |
packages: write # For pushing to GHCR | |
outputs: | |
matrix: ${{ steps.set-matrix.outputs.matrix }} | |
connector-count: ${{ steps.set-matrix.outputs.connector-count }} | |
image-tag: ${{ steps.set-tag.outputs.tag }} | |
steps: | |
- uses: actions/checkout@v5 | |
with: | |
fetch-depth: 0 # Need full history for git diff | |
# Set unique image tag for this run | |
- name: Set image tag | |
id: set-tag | |
run: | | |
TAG="monke-${{ github.run_id }}-${{ github.run_attempt }}" | |
echo "tag=$TAG" >> $GITHUB_OUTPUT | |
echo "Using image tag: $TAG" | |
# Setup Docker Buildx for efficient builds | |
- name: Set up Docker Buildx | |
uses: docker/setup-buildx-action@v3 | |
# Login to GitHub Container Registry | |
- name: Log in to GitHub Container Registry | |
uses: docker/login-action@v3 | |
with: | |
registry: ghcr.io | |
username: ${{ github.actor }} | |
password: ${{ secrets.GITHUB_TOKEN }} | |
# Build and push backend image (ONCE for all parallel jobs) | |
- name: Build and push backend image | |
uses: docker/build-push-action@v5 | |
with: | |
context: ./backend | |
push: true | |
tags: ghcr.io/${{ github.repository }}/test-backend:${{ steps.set-tag.outputs.tag }} | |
cache-from: type=gha | |
cache-to: type=gha,mode=max | |
# Note: Frontend image not needed for connector tests (backend-only) | |
# Determine which connectors to test | |
- name: Determine connectors to test | |
id: set-matrix | |
env: | |
BASE_BRANCH: ${{ github.base_ref || 'main' }} | |
run: | | |
# Use monke.sh CLI to determine connectors (keeps logic DRY) | |
# This returns space-separated list: core + changed, minimum 4 connectors | |
chmod +x ./monke.sh | |
echo "Determining connectors using monke.sh..." | |
CONNECTORS=$(./monke.sh --print-connectors --changed) | |
echo "Connectors to test: $CONNECTORS" | |
# Convert space-separated list to JSON array for matrix | |
MATRIX_JSON="[" | |
FIRST=true | |
for connector in $CONNECTORS; do | |
if [[ "$FIRST" == true ]]; then | |
FIRST=false | |
else | |
MATRIX_JSON+="," | |
fi | |
MATRIX_JSON+="\"$connector\"" | |
done | |
MATRIX_JSON+="]" | |
# Count connectors | |
CONNECTOR_COUNT=$(echo $CONNECTORS | wc -w) | |
echo "Final matrix: $MATRIX_JSON" | |
echo "Connector count: $CONNECTOR_COUNT" | |
echo "matrix=$MATRIX_JSON" >> $GITHUB_OUTPUT | |
echo "connector-count=$CONNECTOR_COUNT" >> $GITHUB_OUTPUT | |
# Job 2: Run tests in parallel (reuses pre-built backend image) | |
test-connectors: | |
needs: build-and-determine | |
runs-on: ubuntu-latest | |
environment: dev | |
name: test-${{ matrix.connector }} | |
strategy: | |
fail-fast: false | |
matrix: | |
connector: ${{ fromJson(needs.build-and-determine.outputs.matrix) }} | |
# Add Azure permissions for OIDC authentication + packages read for pulling images | |
permissions: | |
id-token: write | |
contents: read | |
pull-requests: read | |
packages: read # For pulling from GHCR | |
env: | |
# Use pre-built backend image from GHCR (frontend not needed for connector tests) | |
BACKEND_IMAGE: ghcr.io/${{ github.repository }}/test-backend:${{ needs.build-and-determine.outputs.image-tag }} | |
# Core configuration | |
AIRWEAVE_API_URL: http://localhost:8001 | |
# AI provider keys for backend runtime | |
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} | |
MISTRAL_API_KEY: ${{ secrets.MISTRAL_API_KEY }} | |
COHERE_API_KEY: ${{ secrets.COHERE_API_KEY }} | |
GROQ_API_KEY: ${{ secrets.GROQ_API_KEY }} | |
CEREBRAS_API_KEY: ${{ secrets.CEREBRAS_API_KEY }} | |
steps: | |
- uses: actions/checkout@v5 | |
with: | |
fetch-depth: 0 # Fetch all history for change detection | |
# Login to GitHub Container Registry to pull images | |
- name: Log in to GitHub Container Registry | |
uses: docker/login-action@v3 | |
with: | |
registry: ghcr.io | |
username: ${{ github.actor }} | |
password: ${{ secrets.GITHUB_TOKEN }} | |
# Pull pre-built backend image from Job 1 | |
- name: Pull pre-built backend image | |
run: | | |
echo "Pulling pre-built backend image..." | |
docker pull ${{ env.BACKEND_IMAGE }} | |
# Tag as local image for docker-compose compatibility | |
docker tag ${{ env.BACKEND_IMAGE }} test-backend:latest | |
# Pre-pull infrastructure images in parallel | |
# Note: Each matrix runner is isolated (separate VM), so we must pull on each | |
# GitHub's infrastructure caches Docker Hub images, so this is relatively fast | |
# text2vec-transformers is disabled via docker-compose profile (backend uses OpenAI) | |
- name: Pre-pull infrastructure images | |
run: | | |
echo "Pre-pulling infrastructure images in parallel..." | |
docker pull postgres:16 & | |
docker pull redis:7-alpine & | |
docker pull qdrant/qdrant:latest & | |
docker pull temporalio/auto-setup:1.24.2 & | |
docker pull temporalio/ui:2.26.2 & | |
wait | |
echo "All infrastructure images pre-pulled successfully" | |
# Python deps (cached) | |
- name: Set up Python 3.11 | |
uses: actions/setup-python@v5 | |
with: | |
python-version: "3.11" | |
cache: "pip" | |
cache-dependency-path: monke/requirements.txt | |
- name: Install Python deps | |
run: | | |
pip install -r monke/requirements.txt | |
# Install Azure dependencies if Key Vault is configured | |
if [[ -n "$AZURE_KEY_VAULT_URL" ]]; then | |
pip install azure-keyvault-secrets azure-identity | |
fi | |
# Setup environment and start services | |
- name: Setup environment and start services | |
env: | |
# Required for start.sh to skip interactive prompts | |
NONINTERACTIVE: "1" | |
# Skip services not needed for connector tests | |
SKIP_LOCAL_EMBEDDINGS: "1" # Use OpenAI embeddings (saves ~2GB) | |
SKIP_FRONTEND: "1" # Backend-only testing (saves time) | |
# Set required environment variables for the containers | |
AUTH0_ENABLED: "false" | |
DEV_MODE: "true" | |
run: | | |
# Create .env from example | |
cp .env.example .env | |
# Add AI keys to .env before starting services | |
echo "OPENAI_API_KEY=$OPENAI_API_KEY" >> .env | |
echo "MISTRAL_API_KEY=$MISTRAL_API_KEY" >> .env | |
echo "COHERE_API_KEY=$COHERE_API_KEY" >> .env | |
echo "GROQ_API_KEY=$GROQ_API_KEY" >> .env | |
echo "CEREBRAS_API_KEY=$CEREBRAS_API_KEY" >> .env | |
[[ -n "${AZURE_KEY_VAULT_URL:-}" ]] && echo "AZURE_KEY_VAULT_URL=$AZURE_KEY_VAULT_URL" >> .env || true | |
echo "Starting services using start.sh..." | |
./start.sh --noninteractive | |
# The script already does health checks, but let's verify | |
echo "" | |
echo "Verifying we're using the correct test images:" | |
docker ps --format "table {{.Names}}\t{{.Image}}" | grep airweave | |
echo "" | |
echo "Final verification of services:" | |
docker ps | |
# Backend runs on port 8001 according to start.sh | |
echo "Testing backend on port 8001..." | |
curl -f http://localhost:8001/health || (echo "Backend not healthy"; docker logs airweave-backend; exit 1) | |
echo "Backend is healthy and ready for connector tests!" | |
# Run test for this specific connector | |
- name: Run acceptance test for ${{ matrix.connector }} | |
env: | |
CI: true # Tells runner to use simple output | |
MONKE_NO_VENV: 1 # Skip venv setup in CI | |
MONKE_MAX_PARALLEL: 5 | |
AIRWEAVE_API_URL: http://localhost:8001 | |
# Core dependencies (keep these as GitHub secrets) | |
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} | |
MISTRAL_API_KEY: ${{ secrets.MISTRAL_API_KEY }} | |
COHERE_API_KEY: ${{ secrets.COHERE_API_KEY }} | |
GROQ_API_KEY: ${{ secrets.GROQ_API_KEY }} | |
CEREBRAS_API_KEY: ${{ secrets.CEREBRAS_API_KEY }} | |
# Monke test authentication | |
MONKE_COMPOSIO_API_KEY: ${{ secrets.MONKE_COMPOSIO_API_KEY }} | |
MONKE_COMPOSIO_PROVIDER_ID: ${{ secrets.MONKE_COMPOSIO_PROVIDER_ID }} | |
# Bitbucket direct auth credentials | |
MONKE_BITBUCKET_USERNAME: ${{ secrets.MONKE_BITBUCKET_USERNAME }} | |
MONKE_BITBUCKET_API_TOKEN: ${{ secrets.MONKE_BITBUCKET_API_TOKEN }} | |
MONKE_BITBUCKET_WORKSPACE: ${{ secrets.MONKE_BITBUCKET_WORKSPACE }} | |
MONKE_BITBUCKET_REPO_SLUG: ${{ secrets.MONKE_BITBUCKET_REPO_SLUG }} | |
# All SaaS app credentials (Composio configs, API tokens, etc.) | |
# will be fetched from Azure Key Vault at runtime | |
run: | | |
# Make script executable | |
chmod +x ./monke.sh | |
# Create .env file first | |
echo "Creating .env file from .env.example..." | |
cp .env.example .env | |
cp .env.example monke/.env | |
# Run single connector test | |
echo "Running test for connector: ${{ matrix.connector }}" | |
./monke.sh ${{ matrix.connector }} | |
# Cleanup Docker containers (like public API tests) | |
- name: Cleanup Docker containers | |
if: always() | |
run: | | |
echo "Cleaning up Docker containers..." | |
docker compose down -v || true | |
docker system prune -f || true | |
- name: Upload logs | |
if: always() | |
uses: actions/upload-artifact@v4 | |
with: | |
name: monke-logs-${{ matrix.connector }} | |
path: monke/logs/** |