Skip to content

E2E Testing Pipeline #34

E2E Testing Pipeline

E2E Testing Pipeline #34

Workflow file for this run

name: E2E Testing Pipeline
on:
push:
branches: [ main, develop, feature/* ]
pull_request:
branches: [ main, develop ]
schedule:
# Run nightly at 2 AM UTC for comprehensive testing
- cron: '0 2 * * *'
workflow_dispatch:
inputs:
test_suite:
description: 'Test suite to run'
required: true
default: 'all'
type: choice
options:
- all
- marketplace
- performance
- chaos
- resilience
environment:
description: 'Environment to test against'
required: true
default: 'development'
type: choice
options:
- development
- staging
- production
chaos_intensity:
description: 'Chaos testing intensity'
required: false
default: 'medium'
type: choice
options:
- low
- medium
- high
- extreme
env:
NODE_VERSION: '18'
PLAYWRIGHT_VERSION: '1.40.0'
jobs:
# Job 1: Basic validation and setup
setup-and-validate:
name: Setup and Validate Environment
runs-on: ubuntu-latest
outputs:
test-matrix: ${{ steps.test-matrix.outputs.matrix }}
should-run-chaos: ${{ steps.conditions.outputs.chaos }}
should-run-performance: ${{ steps.conditions.outputs.performance }}
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: ${{ env.NODE_VERSION }}
cache: 'npm'
- name: Install dependencies
run: |
npm ci
npx playwright install --with-deps
- name: Validate test environment
run: |
npm run type-check
npm run lint
npx playwright --version
- name: Build application
run: npm run build
- name: Determine test conditions
id: conditions
run: |
# Determine what tests to run based on trigger and inputs
if [[ "${{ github.event_name }}" == "schedule" ]]; then
echo "chaos=true" >> $GITHUB_OUTPUT
echo "performance=true" >> $GITHUB_OUTPUT
elif [[ "${{ github.event_name }}" == "workflow_dispatch" ]]; then
if [[ "${{ github.event.inputs.test_suite }}" == "all" || "${{ github.event.inputs.test_suite }}" == "chaos" ]]; then
echo "chaos=true" >> $GITHUB_OUTPUT
else
echo "chaos=false" >> $GITHUB_OUTPUT
fi
if [[ "${{ github.event.inputs.test_suite }}" == "all" || "${{ github.event.inputs.test_suite }}" == "performance" ]]; then
echo "performance=true" >> $GITHUB_OUTPUT
else
echo "performance=false" >> $GITHUB_OUTPUT
fi
else
echo "chaos=false" >> $GITHUB_OUTPUT
echo "performance=true" >> $GITHUB_OUTPUT
fi
- name: Generate test matrix
id: test-matrix
run: |
# Generate dynamic test matrix based on conditions
MATRIX='{"include":['
# Always run marketplace tests
MATRIX+='{
"suite": "marketplace",
"browser": "chromium",
"organization": "swaggystacks",
"timeout": 300000
},'
MATRIX+='{
"suite": "marketplace",
"browser": "chromium",
"organization": "scientia",
"timeout": 300000
},'
# Add performance tests if needed
if [[ "${{ steps.conditions.outputs.performance }}" == "true" ]]; then
MATRIX+='{
"suite": "performance",
"browser": "chromium",
"organization": "swaggystacks",
"timeout": 600000
},'
fi
# Add chaos tests if needed
if [[ "${{ steps.conditions.outputs.chaos }}" == "true" ]]; then
MATRIX+='{
"suite": "chaos",
"browser": "chromium",
"organization": "swaggystacks",
"timeout": 900000
},'
fi
# Remove trailing comma and close matrix
MATRIX=${MATRIX%,}
MATRIX+=']}'
echo "matrix=$MATRIX" >> $GITHUB_OUTPUT
echo "Generated test matrix: $MATRIX"
# Job 2: Core marketplace testing
marketplace-tests:
name: Marketplace Tests (${{ matrix.organization }})
runs-on: ubuntu-latest
needs: setup-and-validate
timeout-minutes: 30
strategy:
fail-fast: false
matrix:
organization: [swaggystacks, scientia]
browser: [chromium, firefox, webkit]
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: ${{ env.NODE_VERSION }}
cache: 'npm'
- name: Install dependencies
run: |
npm ci
npx playwright install --with-deps ${{ matrix.browser }}
- name: Start development server
run: |
npm run dev &
sleep 10
curl -f http://localhost:3001 || exit 1
- name: Run marketplace E2E tests
env:
TEST_ORGANIZATION: ${{ matrix.organization }}
PLAYWRIGHT_BROWSER: ${{ matrix.browser }}
run: |
npx playwright test tests/e2e/marketplace/ \
--reporter=html,junit \
--output-dir=test-results/${{ matrix.organization }}-${{ matrix.browser }}
- name: Upload test results
uses: actions/upload-artifact@v4
if: always()
with:
name: marketplace-results-${{ matrix.organization }}-${{ matrix.browser }}
path: |
test-results/
playwright-report/
retention-days: 7
# Job 3: Performance testing
performance-tests:
name: Performance Tests
runs-on: ubuntu-latest
needs: [setup-and-validate]
if: needs.setup-and-validate.outputs.should-run-performance == 'true'
timeout-minutes: 45
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: ${{ env.NODE_VERSION }}
cache: 'npm'
- name: Install dependencies
run: |
npm ci
npx playwright install --with-deps chromium
- name: Start development server
run: |
npm run dev &
sleep 15
curl -f http://localhost:3001 || exit 1
- name: Run performance tests
env:
PERFORMANCE_BUDGET_MS: 3000
MEMORY_THRESHOLD_MB: 100
run: |
npx playwright test tests/e2e/performance/ \
--reporter=html,json \
--output-dir=test-results/performance
- name: Analyze performance metrics
run: |
# Extract performance metrics from test results
node -e "
const fs = require('fs');
try {
const results = JSON.parse(fs.readFileSync('test-results/performance/results.json', 'utf8'));
console.log('Performance Test Summary:');
console.log('- Tests run:', results.stats.total);
console.log('- Tests passed:', results.stats.passed);
console.log('- Average duration:', Math.round(results.stats.duration / results.stats.total), 'ms');
} catch (e) {
console.log('No performance results to analyze');
}
"
- name: Upload performance results
uses: actions/upload-artifact@v4
if: always()
with:
name: performance-results
path: |
test-results/performance/
playwright-report/
retention-days: 14
# Job 4: Chaos and resilience testing
chaos-tests:
name: Chaos & Resilience Tests
runs-on: ubuntu-latest
needs: [setup-and-validate]
if: needs.setup-and-validate.outputs.should-run-chaos == 'true'
timeout-minutes: 60
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: ${{ env.NODE_VERSION }}
cache: 'npm'
- name: Install dependencies
run: |
npm ci
npx playwright install --with-deps chromium
- name: Start development server
run: |
npm run dev &
sleep 15
curl -f http://localhost:3001 || exit 1
- name: Run chaos testing
env:
CHAOS_INTENSITY: ${{ github.event.inputs.chaos_intensity || 'medium' }}
MAX_RECOVERY_TIME_MS: 30000
run: |
npx playwright test tests/e2e/chaos/ \
--reporter=html,json \
--output-dir=test-results/chaos \
--timeout=900000
- name: Analyze resilience metrics
run: |
# Extract resilience metrics
node -e "
const fs = require('fs');
try {
const results = JSON.parse(fs.readFileSync('test-results/chaos/results.json', 'utf8'));
console.log('Chaos Testing Summary:');
console.log('- Scenarios tested:', results.stats.total);
console.log('- Recovery success rate:', Math.round((results.stats.passed / results.stats.total) * 100), '%');
// Check for SLA compliance
if (results.stats.passed / results.stats.total < 0.8) {
console.error('❌ Resilience SLA not met (< 80% success rate)');
process.exit(1);
} else {
console.log('✅ Resilience SLA met (≥ 80% success rate)');
}
} catch (e) {
console.log('No chaos results to analyze');
}
"
- name: Upload chaos test results
uses: actions/upload-artifact@v4
if: always()
with:
name: chaos-results
path: |
test-results/chaos/
playwright-report/
retention-days: 30
# Job 5: Test result aggregation and reporting
aggregate-results:
name: Aggregate Test Results
runs-on: ubuntu-latest
needs: [marketplace-tests, performance-tests, chaos-tests]
if: always()
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Download all test artifacts
uses: actions/download-artifact@v4
with:
path: all-test-results/
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: ${{ env.NODE_VERSION }}
- name: Generate comprehensive test report
run: |
node -e "
const fs = require('fs');
const path = require('path');
console.log('# E2E Test Pipeline Summary');
console.log('');
console.log('## Test Execution Results');
let totalTests = 0;
let totalPassed = 0;
let totalFailed = 0;
// Aggregate results from all test suites
const resultDirs = fs.readdirSync('all-test-results/', { withFileTypes: true })
.filter(dirent => dirent.isDirectory())
.map(dirent => dirent.name);
for (const dir of resultDirs) {
console.log(\`### \${dir.replace(/-/g, ' ').toUpperCase()}\`);
try {
const resultFiles = fs.readdirSync(\`all-test-results/\${dir}/test-results/\`);
console.log(\`- Artifacts found: \${resultFiles.length} files\`);
// Look for JUnit XML files for more detailed analysis
const junitFiles = resultFiles.filter(f => f.endsWith('.xml'));
if (junitFiles.length > 0) {
console.log(\`- JUnit reports: \${junitFiles.length}\`);
}
} catch (e) {
console.log('- No detailed results available');
}
console.log('');
}
console.log('## Pipeline Status');
console.log('- Marketplace Tests: ${{ needs.marketplace-tests.result }}');
console.log('- Performance Tests: ${{ needs.performance-tests.result }}');
console.log('- Chaos Tests: ${{ needs.chaos-tests.result }}');
console.log('');
// Determine overall pipeline result
const results = [
'${{ needs.marketplace-tests.result }}',
'${{ needs.performance-tests.result }}',
'${{ needs.chaos-tests.result }}'
].filter(r => r !== 'skipped');
const hasFailures = results.includes('failure');
const hasCancelled = results.includes('cancelled');
if (hasFailures) {
console.log('❌ **PIPELINE FAILED** - Critical test failures detected');
process.exit(1);
} else if (hasCancelled) {
console.log('⚠️ **PIPELINE INCOMPLETE** - Some tests were cancelled');
} else {
console.log('✅ **PIPELINE PASSED** - All tests completed successfully');
}
" > test-summary.md
- name: Upload aggregated results
uses: actions/upload-artifact@v4
with:
name: pipeline-summary
path: |
test-summary.md
all-test-results/
retention-days: 30
- name: Comment test results on PR
if: github.event_name == 'pull_request'
uses: actions/github-script@v7
with:
script: |
const fs = require('fs');
const summary = fs.readFileSync('test-summary.md', 'utf8');
github.rest.issues.createComment({
issue_number: context.issue.number,
owner: context.repo.owner,
repo: context.repo.repo,
body: `## 🧪 E2E Test Pipeline Results\n\n${summary}`
});
# Job 6: Deployment validation (only on main branch)
deployment-validation:
name: Deployment Validation
runs-on: ubuntu-latest
needs: [marketplace-tests, performance-tests]
if: |
github.ref == 'refs/heads/main' &&
needs.marketplace-tests.result == 'success' &&
(needs.performance-tests.result == 'success' || needs.performance-tests.result == 'skipped')
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: ${{ env.NODE_VERSION }}
cache: 'npm'
- name: Install dependencies
run: npm ci
- name: Build for production
run: npm run build
- name: Validate deployment readiness
run: |
echo "🚀 Validating deployment readiness..."
# Check if build artifacts exist
if [[ ! -d ".next" ]]; then
echo "❌ Build artifacts not found"
exit 1
fi
# Check build size
BUILD_SIZE=$(du -sh .next | cut -f1)
echo "📦 Build size: $BUILD_SIZE"
# Run basic smoke tests on production build
npm run start &
SERVER_PID=$!
sleep 15
# Test both organization routes
curl -f http://localhost:3000/swaggystacks || exit 1
curl -f http://localhost:3000/scientia || exit 1
curl -f http://localhost:3000/marketplace || exit 1
kill $SERVER_PID
echo "✅ Deployment validation passed"
- name: Set deployment status
run: |
echo "DEPLOYMENT_READY=true" >> $GITHUB_ENV
echo "🎯 Ready for deployment to production"
# Notification job for important events
notify-results:
name: Notify Results
runs-on: ubuntu-latest
needs: [marketplace-tests, performance-tests, chaos-tests, aggregate-results]
if: always() && (github.event_name == 'schedule' || github.ref == 'refs/heads/main')
steps:
- name: Send notification
run: |
# Determine notification level
if [[ "${{ needs.marketplace-tests.result }}" == "failure" ]]; then
LEVEL="🚨 CRITICAL"
MESSAGE="Marketplace tests failed - immediate attention required"
elif [[ "${{ needs.chaos-tests.result }}" == "failure" ]]; then
LEVEL="⚠️ WARNING"
MESSAGE="Resilience tests failed - system may not handle failures gracefully"
elif [[ "${{ needs.performance-tests.result }}" == "failure" ]]; then
LEVEL="📊 PERFORMANCE"
MESSAGE="Performance tests failed - system may not meet SLA requirements"
else
LEVEL="✅ SUCCESS"
MESSAGE="All E2E tests passed successfully"
fi
echo "$LEVEL: $MESSAGE"
echo "GitHub Run: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}"