Skip to content

Cypress e2e Test

Cypress e2e Test #2

name: Cypress e2e Test PR Pipeline
# This workflow currently executes on the ODH (Open Data Hub) dash-e2e cluster.
# The test-variables.yml downloaded from GitLab contains RHOAI namespaces by default,
# which are overridden with ODH namespaces from the ODH_NAMESPACES GitHub secret.
#
# This workflow is triggered after test.yml completes successfully.
# It uses concurrency control to ensure only the latest E2E run executes for each PR/branch.
# Labels (e2e-Pass/e2e-Fail) are applied to PRs based on test results.
on:
# Trigger after test.yml completes successfully
workflow_run:
workflows: ["Test"]
types: [completed]
# Allow manual trigger for testing/debugging
workflow_dispatch:
# Concurrency control: Only run latest E2E tests for each PR/branch
# PRs get unique groups (e2e-pr-{number}), branches get branch-based groups (e2e-branch-{name})
concurrency:
group: ${{ github.event.workflow_run && (github.event.workflow_run.pull_requests[0].number && format('e2e-pr-{0}', github.event.workflow_run.pull_requests[0].number) || format('e2e-branch-{0}', github.event.workflow_run.head_branch || github.event.workflow_run.head_sha)) || format('e2e-manual-{0}', github.run_id) }}
cancel-in-progress: true
permissions:
contents: read
pull-requests: write
env:
NODE_VERSION: 22.x
DO_NOT_TRACK: 1
jobs:
get-test-tags:
runs-on: self-hosted
# Only run if test.yml succeeded (for workflow_run) or if manually triggered
if: github.event_name == 'workflow_dispatch' || github.event.workflow_run.conclusion == 'success'
outputs:
test-tags: ${{ steps.set-tags.outputs.test-tags }}
steps:
- name: Set default test tags
id: set-tags
# Test sets:
# @ci-dashboard-set-1: Pipelines (Import and Run), Workbenches (create with storage), Project Creation, User Login
# @ci-dashboard-set-2: Model Stop/Start, Cluster Settings, Connection Creation, Cluster Storage Creation
# TODO: Use turbo to run tests according to PR changes.
run: |
echo "test-tags=[\"@ci-dashboard-set-1\",\"@ci-dashboard-set-2\"]" >> $GITHUB_OUTPUT
e2e-tests:
needs: [get-test-tags]
runs-on: self-hosted
timeout-minutes: 30
strategy:
fail-fast: false
matrix:
test-tag: ${{ fromJson(needs.get-test-tags.outputs.test-tags) }}
steps:
- name: Calculate unique port for this workflow run
run: |
# Dynamic port allocation for parallel execution
BASE_PORT=$((4000 + (${{ github.run_id }} % 1000) * 5))
# Add matrix offset to separate concurrent jobs within same PR
if [[ "${{ matrix.test-tag }}" == *"set-1"* ]]; then
MATRIX_OFFSET=0
elif [[ "${{ matrix.test-tag }}" == *"set-2"* ]]; then
MATRIX_OFFSET=1
else
MATRIX_OFFSET=2
fi
WEBPACK_PORT=$((BASE_PORT + MATRIX_OFFSET))
# Store port info with run_id for cleanup tracking
PORT_INFO_DIR="/tmp/gha-ports"
mkdir -p "$PORT_INFO_DIR"
echo "${{ github.run_id }}" > "$PORT_INFO_DIR/port-${WEBPACK_PORT}.run_id"
echo "WEBPACK_PORT=$WEBPACK_PORT" >> $GITHUB_ENV
echo "PORT_INFO_FILE=$PORT_INFO_DIR/port-${WEBPACK_PORT}.run_id" >> $GITHUB_ENV
echo "📍 Using port ${WEBPACK_PORT} for ${{ matrix.test-tag }} (run_id: ${{ github.run_id }})"
- name: Checkout code
uses: actions/checkout@v4
with:
# For workflow_run events from PRs (including forks), checkout using PR ref format
# This works for both same-repo PRs and fork PRs (refs/pull/{number}/head)
# For branch pushes, use head_sha or head_branch
# For manual triggers, use the default (current commit)
ref: ${{ github.event.workflow_run && (github.event.workflow_run.pull_requests[0].number && format('refs/pull/{0}/head', github.event.workflow_run.pull_requests[0].number) || github.event.workflow_run.head_sha || github.event.workflow_run.head_branch) || github.ref }}
- name: Restore npm dependencies cache
uses: actions/cache/restore@v4
id: npm-cache
with:
path: |
~/.cache/Cypress
**/node_modules
key: ${{ runner.os }}-${{ env.NODE_VERSION }}-all-modules-${{ hashFiles('**/package-lock.json') }}
restore-keys: |
${{ runner.os }}-${{ env.NODE_VERSION }}-all-modules-
- name: Setup Node.js ${{ env.NODE_VERSION }}
if: steps.npm-cache.outputs.cache-hit != 'true'
uses: actions/setup-node@v4.3.0
with:
node-version: ${{ env.NODE_VERSION }}
- name: Install dependencies
if: steps.npm-cache.outputs.cache-hit != 'true'
run: npm ci
- name: Restore turbo build artifacts cache
uses: actions/cache/restore@v4
with:
path: ${{ github.workspace }}/.turbo
key: ${{ runner.os }}-${{ env.NODE_VERSION }}-turbo-${{ github.sha }}-e2e
restore-keys: |
${{ runner.os }}-${{ env.NODE_VERSION }}-turbo-
- name: Restore OpenShift CLI tarball cache
uses: actions/cache/restore@v4
id: oc-cache
with:
path: ${{ runner.temp }}/oc.tar.gz
key: ${{ runner.os }}-oc-tarball-${{ env.OC_VERSION || '4.15.0' }}
- name: Download test configuration
run: |
echo "🔧 Downloading test configuration from GitLab..."
if [ -n "${{ secrets.GITLAB_TOKEN }}" ] && [ -n "${{ secrets.GITLAB_TEST_VARS_URL }}" ]; then
if curl -f -k -H "Authorization: Bearer ${{ secrets.GITLAB_TOKEN }}" \
"${{ secrets.GITLAB_TEST_VARS_URL }}" \
-o ${{ github.workspace }}/frontend/src/__tests__/cypress/test-variables.yml; then
echo "✅ Successfully downloaded test configuration"
else
echo "❌ Failed to download test configuration from GitLab"
exit 1
fi
else
echo "⚠️ GitLab secrets not available (forked PR or secrets not configured)"
echo "💡 Attempting to use fallback test-variables.yml if available"
# Check if test-variables.yml already exists (e.g., from a previous step or cached)
if [ ! -f "${{ github.workspace }}/frontend/src/__tests__/cypress/test-variables.yml" ]; then
echo "❌ No test-variables.yml found and secrets not available"
echo "💡 This workflow requires GITLAB_TOKEN and GITLAB_TEST_VARS_URL secrets"
exit 1
else
echo "✅ Using existing test-variables.yml file"
fi
fi
# Update ODH_DASHBOARD_URL if secret is not available (for fork PRs)
if [ -z "${{ secrets.ODH_DASHBOARD_URL }}" ]; then
echo "⚠️ ODH_DASHBOARD_URL secret not available (forked PR) - using localhost"
sed -i.bak "s|ODH_DASHBOARD_URL:.*|ODH_DASHBOARD_URL: http://localhost:${WEBPACK_PORT}|" ${{ github.workspace }}/frontend/src/__tests__/cypress/test-variables.yml || true
fi
- name: Login to OpenShift cluster
run: |
# For forked PRs, skip OpenShift login since secrets are not available
if [ -z "${{ secrets.OC_SERVER }}" ]; then
echo "⚠️ OpenShift secrets not available (forked PR) - skipping cluster login"
exit 0
fi
# Read credentials from downloaded test-variables.yml
TEST_VARS_FILE="${{ github.workspace }}/frontend/src/__tests__/cypress/test-variables.yml"
# Extract OC_USERNAME from OCP_ADMIN_USER section
OC_USERNAME=$(grep -A 10 "^OCP_ADMIN_USER:" "$TEST_VARS_FILE" | grep "USERNAME:" | head -1 | sed 's/.*USERNAME: //' | tr -d ' ')
# Extract OC_PASSWORD from OCP_ADMIN_USER section
OC_PASSWORD=$(grep -A 10 "^OCP_ADMIN_USER:" "$TEST_VARS_FILE" | grep "PASSWORD:" | head -1 | sed 's/.*PASSWORD: //' | tr -d ' ')
# Mask sensitive data in logs
echo "::add-mask::$OC_PASSWORD"
echo "::add-mask::$OC_USERNAME"
echo "Logging in to OpenShift cluster..."
oc login -u "$OC_USERNAME" -p "$OC_PASSWORD" --server="${{ secrets.OC_SERVER }}" --insecure-skip-tls-verify > /dev/null 2>&1
if [ $? -eq 0 ]; then
echo "✅ Successfully logged in to OpenShift cluster (dash-e2e-odh)"
else
echo "❌ Failed to login to OpenShift cluster"
exit 1
fi
# Export OpenShift configuration for Cypress tests
export KUBECONFIG="$HOME/.kube/config"
echo "KUBECONFIG=$KUBECONFIG" >> $GITHUB_ENV
- name: Override namespace values from secrets
run: |
TEST_VARS_FILE="${{ github.workspace }}/frontend/src/__tests__/cypress/test-variables.yml"
echo "Overriding RHOAI namespaces with ODH namespaces from secrets..."
# Helper function to set or update a key-value pair in YAML
set_yaml_value() {
local key="$1"
local value="$2"
if [ -z "$value" ]; then
return
fi
# Remove leading/trailing whitespace from value
value=$(echo "$value" | xargs)
if grep -q "^${key}:" "$TEST_VARS_FILE"; then
sed -i "s|^${key}:.*|${key}: ${value}|" "$TEST_VARS_FILE"
else
echo "${key}: ${value}" >> "$TEST_VARS_FILE"
fi
}
# Read ODH namespace values from GitHub secret
# This overrides the default RHOAI namespaces in test-variables.yml
ODH_NAMESPACES="${{ secrets.ODH_NAMESPACES }}"
if [ -z "$ODH_NAMESPACES" ]; then
echo "⚠️ ODH_NAMESPACES secret not set, skipping namespace override"
exit 0
fi
# Mask namespace values in logs
echo "::add-mask::$ODH_NAMESPACES"
echo "📝 Overriding namespaces with ODH values..."
# Parse comma-separated values from secret
# Format: OPERATOR_NAMESPACE,APPLICATIONS_NAMESPACE,NOTEBOOKS_NAMESPACE,OPERATOR_NAME,ODH_DASHBOARD_PROJECT_NAME
IFS=',' read -r OPERATOR_NS APPLICATIONS_NS NOTEBOOKS_NS OPERATOR_NAME PROJECT_NAME <<< "$ODH_NAMESPACES"
# Override RHOAI namespaces with ODH values
set_yaml_value "PRODUCT" "ODH"
set_yaml_value "OPERATOR_NAMESPACE" "$OPERATOR_NS"
set_yaml_value "APPLICATIONS_NAMESPACE" "$APPLICATIONS_NS"
set_yaml_value "MONITORING_NAMESPACE" "$APPLICATIONS_NS"
set_yaml_value "NOTEBOOKS_NAMESPACE" "$NOTEBOOKS_NS"
set_yaml_value "OPERATOR_NAME" "$OPERATOR_NAME"
set_yaml_value "ODH_DASHBOARD_PROJECT_NAME" "$PROJECT_NAME"
echo "✅ Overridden namespaces with ODH values"
echo "Namespace configuration updated"
- name: Set test configuration
run: |
export CY_TEST_CONFIG="${{ github.workspace }}/frontend/src/__tests__/cypress/test-variables.yml"
echo "CY_TEST_CONFIG=$CY_TEST_CONFIG" >> $GITHUB_ENV
echo "✅ Test configuration set (Cypress will connect to localhost:${WEBPACK_PORT})"
- name: Start Cypress Server
run: |
echo "🧹 Cleaning up port ${WEBPACK_PORT}..."
PORT_INFO_DIR="/tmp/gha-ports"
PORT_INFO_FILE="$PORT_INFO_DIR/port-${WEBPACK_PORT}.run_id"
CURRENT_RUN_ID="${{ github.run_id }}"
# Check if port is in use
if lsof -i:${WEBPACK_PORT} > /dev/null 2>&1; then
# Check if there's a run_id file for this port
if [ -f "$PORT_INFO_FILE" ]; then
PORT_OWNER_RUN_ID=$(cat "$PORT_INFO_FILE")
if [ "$PORT_OWNER_RUN_ID" != "$CURRENT_RUN_ID" ]; then
echo "⚠️ Port ${WEBPACK_PORT} is owned by different run_id: $PORT_OWNER_RUN_ID"
echo "⚠️ This port is in use by another workflow run - will not kill it"
# Try to find an alternative port
for alt_port in $(seq $((WEBPACK_PORT + 5)) $((WEBPACK_PORT + 50)) 5); do
if ! lsof -i:${alt_port} > /dev/null 2>&1; then
WEBPACK_PORT=$alt_port
PORT_INFO_FILE="$PORT_INFO_DIR/port-${WEBPACK_PORT}.run_id"
echo "✅ Found alternative port: ${WEBPACK_PORT}"
break
fi
done
else
echo "✅ Port ${WEBPACK_PORT} is owned by this run - safe to clean up"
fi
else
# No run_id file - check if process is from a recent GitHub Actions run
PORT_PID=$(lsof -ti:${WEBPACK_PORT} 2>/dev/null | head -1)
if [ -n "$PORT_PID" ]; then
# Check if process is from a GitHub Actions workflow
if ps -p "$PORT_PID" -o command= 2>/dev/null | grep -q "webpack.*serve\|node.*40[0-9][0-9]"; then
echo "⚠️ Port ${WEBPACK_PORT} in use by potential GHA process (PID: $PORT_PID)"
echo "⚠️ Being cautious - will not kill without run_id confirmation"
# Find alternative port
for alt_port in $(seq $((WEBPACK_PORT + 5)) $((WEBPACK_PORT + 50)) 5); do
if ! lsof -i:${alt_port} > /dev/null 2>&1; then
WEBPACK_PORT=$alt_port
PORT_INFO_FILE="$PORT_INFO_DIR/port-${WEBPACK_PORT}.run_id"
echo "✅ Found alternative port: ${WEBPACK_PORT}"
break
fi
done
else
echo "⚠️ Port ${WEBPACK_PORT} in use by non-GHA process - cleaning up"
kill -9 "$PORT_PID" 2>/dev/null || true
fi
fi
fi
fi
# Verify port is free with retry logic
RETRY_COUNT=0
while lsof -i:${WEBPACK_PORT} > /dev/null 2>&1; do
RETRY_COUNT=$((RETRY_COUNT + 1))
if [ $RETRY_COUNT -gt 10 ]; then
echo "❌ Port ${WEBPACK_PORT} still in use after cleanup!"
lsof -i:${WEBPACK_PORT}
exit 1
fi
echo "⏳ Retrying cleanup... (attempt $RETRY_COUNT/10)"
sleep 2
done
# Claim the port with our run_id
mkdir -p "$PORT_INFO_DIR"
echo "$CURRENT_RUN_ID" > "$PORT_INFO_FILE"
echo "WEBPACK_PORT=$WEBPACK_PORT" >> $GITHUB_ENV
echo "PORT_INFO_FILE=$PORT_INFO_FILE" >> $GITHUB_ENV
echo "✅ Port ${WEBPACK_PORT} is free and claimed by run_id: $CURRENT_RUN_ID"
echo "🚀 Starting webpack dev server on port ${WEBPACK_PORT} (dash-e2e-odh)..."
# Start webpack and filter sensitive output
cd frontend && ODH_PORT=${WEBPACK_PORT} npm run start:dev:ext > /tmp/webpack_${WEBPACK_PORT}.log 2>&1 &
SERVER_PID=$!
echo "SERVER_PID=$SERVER_PID" >> $GITHUB_ENV
echo "$SERVER_PID" > "$PORT_INFO_DIR/port-${WEBPACK_PORT}.pid"
# Give server time to initialize
sleep 20
# Show filtered webpack status (hide sensitive cluster URLs)
if [ -f /tmp/webpack_${WEBPACK_PORT}.log ]; then
tail -20 /tmp/webpack_${WEBPACK_PORT}.log | \
grep -v "Dashboard host:" | \
grep -v "Proxy created:" | \
grep -v "Logged in as user:" | \
grep -v "Using project:" || true
fi
- name: Wait for Server Ready
run: |
echo "⏳ Waiting for localhost:${WEBPACK_PORT} to be ready..."
npx wait-on http://localhost:${WEBPACK_PORT} --timeout 120000
# Verify the application loads with dashboard content
for i in {1..10}; do
if curl -s -f http://localhost:${WEBPACK_PORT}/ | grep -q "Data Science Projects\|ODH\|Open Data Hub\|Dashboard"; then
echo "✅ Server is ready and application is loaded!"
break
fi
if [ $i -lt 10 ]; then
echo "⏳ Waiting for application to load... (attempt $i/10)"
sleep 8
else
echo "❌ Application failed to load properly after 10 attempts"
exit 1
fi
done
- name: Run E2E Tests
run: |
cd frontend
echo "🧪 Running E2E tests for ${{ matrix.test-tag }}..."
echo "🚀 Running tests against live dashboard on port ${WEBPACK_PORT}"
export CY_RESULTS_DIR="${{ github.workspace }}/frontend/src/__tests__/cypress/results/${{ matrix.test-tag }}"
mkdir -p "$CY_RESULTS_DIR"
# Run Cypress tests with Chrome browser
# Videos are recorded and automatically uploaded on failure (default Cypress behavior)
BASE_URL=http://localhost:${WEBPACK_PORT} npm run cypress:run:chrome -- \
--env skipTags="@Bug @Maintain @NonConcurrent",grepTags="${{ matrix.test-tag }}",grepFilterSpecs=true \
--config video=true,screenshotsFolder="$CY_RESULTS_DIR/screenshots",videosFolder="$CY_RESULTS_DIR/videos"
- name: Test tag name
if: ${{ always() }}
run: |
TEST_TAG_NAME=$(echo '${{ matrix.test-tag }}' | tr '/' '_' | tr '@' '_')
echo "TEST_TAG_NAME=$TEST_TAG_NAME" >> $GITHUB_ENV
- name: Upload test results
if: always()
uses: actions/upload-artifact@v4
with:
name: e2e-test-results-${{ matrix.test-tag }}
path: |
frontend/src/__tests__/cypress/results/
frontend/src/__tests__/cypress/videos/
frontend/src/__tests__/cypress/screenshots/
frontend/src/__tests__/cypress/coverage/
retention-days: 7
- name: Log test completion
if: always()
run: |
echo "🏁 E2E Test completed!"
echo "Status: ${{ job.status }}"
echo "Event: ${{ github.event_name }}"
echo "Branch: ${{ github.ref_name }}"
echo "Test Tag: ${{ matrix.test-tag }}"
echo "Commit: ${{ github.sha }}"
echo "Run ID: ${{ github.run_id }}"
echo ""
echo "📊 Test artifacts uploaded to:"
echo "${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}"
cleanup-server:
needs: [e2e-tests]
runs-on: self-hosted
if: ${{ always() && needs.e2e-tests.result != 'skipped' }}
steps:
- name: Stop Cypress Servers
run: |
echo "🛑 Stopping webpack dev server for run_id: ${{ github.run_id }}..."
PORT_INFO_DIR="/tmp/gha-ports"
CURRENT_RUN_ID="${{ github.run_id }}"
KILLED_COUNT=0
# Find all port files owned by this run_id
if [ -d "$PORT_INFO_DIR" ]; then
for port_file in "$PORT_INFO_DIR"/port-*.run_id; do
if [ -f "$port_file" ]; then
PORT_OWNER_RUN_ID=$(cat "$port_file")
if [ "$PORT_OWNER_RUN_ID" = "$CURRENT_RUN_ID" ]; then
# Extract port number from filename
PORT=$(basename "$port_file" | sed 's/port-\([0-9]*\)\.run_id/\1/')
PID_FILE="$PORT_INFO_DIR/port-${PORT}.pid"
# Kill process if PID file exists
if [ -f "$PID_FILE" ]; then
PID=$(cat "$PID_FILE")
if ps -p "$PID" > /dev/null 2>&1; then
echo "🛑 Killing process $PID on port $PORT (run_id: $CURRENT_RUN_ID)"
# Kill the main process and all its children (including Chrome processes spawned by Cypress)
pkill -P "$PID" 2>/dev/null || true
kill "$PID" 2>/dev/null || true
KILLED_COUNT=$((KILLED_COUNT + 1))
fi
fi
# Also kill any process on this port (double-check)
PORT_PID=$(lsof -ti:${PORT} 2>/dev/null | head -1)
if [ -n "$PORT_PID" ]; then
echo "🛑 Killing process $PORT_PID on port $PORT"
# Kill children processes too
pkill -P "$PORT_PID" 2>/dev/null || true
kill "$PORT_PID" 2>/dev/null || true
fi
# Clean up any orphaned Chrome processes that might be related to Cypress
ALL_PORT_PIDS=$(lsof -ti:${PORT} 2>/dev/null || true)
if [ -n "$ALL_PORT_PIDS" ]; then
for port_pid in $ALL_PORT_PIDS; do
# Check if this is a Chrome/Chromium process
if ps -p "$port_pid" -o comm= 2>/dev/null | grep -qE "chrome|chromium"; then
echo "🛑 Killing Chrome process $port_pid (using port $PORT)"
# Kill the Chrome process and all its children
pkill -P "$port_pid" 2>/dev/null || true
kill "$port_pid" 2>/dev/null || true
fi
done
fi
# Also look for Chrome processes that are children of our webpack process
if [ -n "$PID" ] && ps -p "$PID" > /dev/null 2>&1; then
CHROME_CHILDREN=$(pgrep -P "$PID" -f "chrome|chromium" 2>/dev/null || true)
if [ -n "$CHROME_CHILDREN" ]; then
echo "🧹 Cleaning up Chrome processes spawned by webpack (PID $PID)..."
for chrome_pid in $CHROME_CHILDREN; do
echo "🛑 Killing Chrome process $chrome_pid (child of webpack PID $PID)"
pkill -P "$chrome_pid" 2>/dev/null || true
kill "$chrome_pid" 2>/dev/null || true
done
fi
fi
# Fallback: Look for Chrome processes with Cypress-specific flags that might be orphaned
# These are Chrome processes that Cypress spawned but might not be directly related to our port
# We only kill them if they're accessing our port to be safe
ORPHANED_CHROME=$(pgrep -f "chrome.*--test-type|chromium.*--test-type" 2>/dev/null || true)
if [ -n "$ORPHANED_CHROME" ]; then
for chrome_pid in $ORPHANED_CHROME; do
# Only kill if it's accessing our port (to avoid killing other users' Chrome processes)
if lsof -p "$chrome_pid" 2>/dev/null | grep -q ":${PORT}"; then
echo "🛑 Killing orphaned Chrome process $chrome_pid (accessing port $PORT)"
pkill -P "$chrome_pid" 2>/dev/null || true
kill "$chrome_pid" 2>/dev/null || true
fi
done
fi
# Clean up port info files
rm -f "$port_file" "$PID_FILE"
fi
fi
done
fi
# Clean up any stale port files older than 24 hours
find "$PORT_INFO_DIR" -name "*.run_id" -mtime +1 -delete 2>/dev/null || true
find "$PORT_INFO_DIR" -name "*.pid" -mtime +1 -delete 2>/dev/null || true
if [ $KILLED_COUNT -eq 0 ]; then
echo "✅ No processes found for run_id: $CURRENT_RUN_ID"
else
echo "✅ Cleaned up $KILLED_COUNT process(es) for run_id: $CURRENT_RUN_ID"
fi