Skip to content

feat: transform to AI Development Cockpit with LangGraph orchestration #10

feat: transform to AI Development Cockpit with LangGraph orchestration

feat: transform to AI Development Cockpit with LangGraph orchestration #10

Workflow file for this run

name: Continuous Integration Pipeline
on:
push:
branches: [ main, develop, feature/* ]
pull_request:
branches: [ main, develop ]
workflow_dispatch:
inputs:
run_security_scans:
description: 'Run comprehensive security scans'
required: false
default: true
type: boolean
skip_e2e_tests:
description: 'Skip E2E tests (for draft PRs)'
required: false
default: false
type: boolean
env:
NODE_VERSION: '18'
CACHE_VERSION: v1
jobs:
# Job 1: Build and Quality Gates
build-and-quality:
name: Build & Quality Gates
runs-on: ubuntu-latest
outputs:
build-success: ${{ steps.build.outcome }}
cache-key: ${{ steps.cache-key.outputs.key }}
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
fetch-depth: 0 # Full history for better caching and analysis
- name: Generate cache key
id: cache-key
run: |
CACHE_KEY="${{ env.CACHE_VERSION }}-${{ runner.os }}-node-${{ hashFiles('**/package-lock.json') }}"
echo "key=$CACHE_KEY" >> $GITHUB_OUTPUT
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: ${{ env.NODE_VERSION }}
cache: 'npm'
- name: Cache dependencies
uses: actions/cache@v4
with:
path: ~/.npm
key: ${{ steps.cache-key.outputs.key }}
restore-keys: |
${{ env.CACHE_VERSION }}-${{ runner.os }}-node-
- name: Install dependencies
run: |
npm ci --prefer-offline --no-audit
echo "📦 Dependencies installed successfully"
- name: TypeScript compilation check
run: |
echo "🔍 Running TypeScript compilation check..."
npm run type-check
echo "✅ TypeScript compilation passed"
- name: Code linting
run: |
echo "🧹 Running ESLint..."
npm run lint
echo "✅ Linting passed"
- name: Code formatting check
run: |
echo "🎨 Checking code formatting..."
npx prettier --check . || {
echo "❌ Code formatting issues found. Run 'npx prettier --write .' to fix."
exit 1
}
echo "✅ Code formatting check passed"
- name: Build application
id: build
run: |
echo "🏗️ Building application..."
npm run build
echo "✅ Build completed successfully"
- name: Bundle size analysis
run: |
echo "📊 Analyzing bundle size..."
BUILD_SIZE=$(du -sh .next 2>/dev/null | cut -f1 || echo "unknown")
echo "📦 Build size: $BUILD_SIZE"
# Check if build size is reasonable (warning if > 50MB)
if [[ -d ".next" ]]; then
BUILD_SIZE_BYTES=$(du -sb .next | cut -f1)
MAX_SIZE=$((50 * 1024 * 1024)) # 50MB in bytes
if [[ $BUILD_SIZE_BYTES -gt $MAX_SIZE ]]; then
echo "⚠️ Warning: Build size ($BUILD_SIZE) exceeds 50MB threshold"
else
echo "✅ Build size within acceptable limits"
fi
fi
- name: Upload build artifacts
uses: actions/upload-artifact@v4
with:
name: build-artifacts
path: |
.next/
package.json
package-lock.json
retention-days: 1
# Job 2: Unit and Integration Tests
test-suite:
name: Test Suite
runs-on: ubuntu-latest
needs: build-and-quality
if: needs.build-and-quality.outputs.build-success == 'success'
strategy:
matrix:
test-type: [unit, integration]
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: ${{ env.NODE_VERSION }}
cache: 'npm'
- name: Restore dependencies cache
uses: actions/cache@v4
with:
path: ~/.npm
key: ${{ needs.build-and-quality.outputs.cache-key }}
- name: Install dependencies
run: npm ci --prefer-offline --no-audit
- name: Run unit tests
if: matrix.test-type == 'unit'
run: |
echo "🧪 Running unit tests..."
npm run test:unit -- --coverage --passWithNoTests
echo "✅ Unit tests completed"
- name: Run integration tests
if: matrix.test-type == 'integration'
run: |
echo "🔗 Running integration tests..."
npm run test -- --testPathPattern="integration" --coverage --passWithNoTests
echo "✅ Integration tests completed"
- name: Upload test coverage
uses: actions/upload-artifact@v4
with:
name: coverage-${{ matrix.test-type }}
path: coverage/
retention-days: 7
# Job 3: Security Scanning
security-scan:
name: Security Scanning
runs-on: ubuntu-latest
needs: build-and-quality
if: |
needs.build-and-quality.outputs.build-success == 'success' &&
(github.event.inputs.run_security_scans != 'false')
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: ${{ env.NODE_VERSION }}
cache: 'npm'
- name: Install dependencies
run: npm ci --prefer-offline --no-audit
- name: Dependency vulnerability scan
run: |
echo "🔒 Running dependency vulnerability scan..."
# npm audit with proper exit code handling
if ! npm audit --audit-level=moderate; then
echo "⚠️ Vulnerabilities found in dependencies"
echo "📊 Generating detailed audit report..."
npm audit --json > audit-report.json || true
# Check for high/critical vulnerabilities
HIGH_VULNS=$(npm audit --audit-level=high --dry-run 2>/dev/null | grep -c "vulnerabilities" || echo "0")
if [[ "$HIGH_VULNS" != "0" ]]; then
echo "❌ High/Critical vulnerabilities found - blocking pipeline"
npm audit --audit-level=high
exit 1
else
echo "⚠️ Low/Moderate vulnerabilities found - continuing with warning"
fi
else
echo "✅ No significant vulnerabilities found"
fi
- name: Secrets detection
run: |
echo "🔍 Scanning for secrets and sensitive data..."
# Check for common secret patterns
SECRETS_FOUND=false
# Check for hardcoded API keys
if grep -r "api[_-]key\s*=\s*['\"][^'\"]*['\"]" src/ 2>/dev/null; then
echo "❌ Potential API keys found in source code"
SECRETS_FOUND=true
fi
# Check for hardcoded passwords
if grep -r "password\s*=\s*['\"][^'\"]*['\"]" src/ 2>/dev/null; then
echo "❌ Potential hardcoded passwords found"
SECRETS_FOUND=true
fi
# Check for .env files in wrong locations
if find . -name ".env*" -not -path "./node_modules/*" -not -name ".env.example" -not -name ".env.local"; then
echo "⚠️ Environment files found - ensure they're not committed"
fi
if [[ "$SECRETS_FOUND" == "true" ]]; then
echo "❌ Security scan failed - secrets detected"
exit 1
else
echo "✅ No secrets detected in source code"
fi
- name: License compliance check
run: |
echo "📜 Checking license compliance..."
# Generate license report
npx license-checker --summary > license-summary.txt || true
if [[ -f "license-summary.txt" ]]; then
echo "📊 License summary:"
cat license-summary.txt
fi
echo "✅ License compliance check completed"
- name: Upload security reports
uses: actions/upload-artifact@v4
if: always()
with:
name: security-reports
path: |
audit-report.json
license-summary.txt
retention-days: 30
# Job 4: Critical E2E Tests (Fast Feedback)
critical-e2e:
name: Critical E2E Tests
runs-on: ubuntu-latest
needs: [build-and-quality, test-suite]
if: |
needs.build-and-quality.outputs.build-success == 'success' &&
needs.test-suite.result == 'success' &&
github.event.inputs.skip_e2e_tests != 'true'
timeout-minutes: 15
strategy:
fail-fast: false
matrix:
organization: [swaggystacks, scientia]
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: ${{ env.NODE_VERSION }}
cache: 'npm'
- name: Install dependencies
run: |
npm ci --prefer-offline --no-audit
npx playwright install --with-deps chromium
- name: Download build artifacts
uses: actions/download-artifact@v4
with:
name: build-artifacts
- name: Start development server
run: |
npm run dev &
# Wait for server to be ready
for i in {1..30}; do
if curl -f http://localhost:3001 2>/dev/null; then
echo "✅ Development server is ready"
break
fi
if [[ $i -eq 30 ]]; then
echo "❌ Development server failed to start"
exit 1
fi
echo "Waiting for server... (attempt $i/30)"
sleep 2
done
- name: Run critical path E2E tests
env:
TEST_ORGANIZATION: ${{ matrix.organization }}
run: |
echo "🧪 Running critical E2E tests for ${{ matrix.organization }}..."
npx playwright test \
tests/e2e/marketplace/critical-path.spec.ts \
tests/e2e/marketplace/smoke-tests.spec.ts \
--reporter=html,junit \
--output-dir=test-results/critical-${{ matrix.organization }}
- name: Upload E2E test results
uses: actions/upload-artifact@v4
if: always()
with:
name: e2e-results-${{ matrix.organization }}
path: |
test-results/
playwright-report/
retention-days: 7
# Job 5: Quality Gate Validation
quality-gates:
name: Quality Gate Validation
runs-on: ubuntu-latest
needs: [build-and-quality, test-suite, security-scan, critical-e2e]
if: always()
steps:
- name: Download all artifacts
uses: actions/download-artifact@v4
with:
path: all-artifacts/
- name: Evaluate quality gates
run: |
echo "🚪 Evaluating quality gates..."
QUALITY_SCORE=0
TOTAL_GATES=5
# Gate 1: Build Success
if [[ "${{ needs.build-and-quality.result }}" == "success" ]]; then
echo "✅ Build Gate: PASSED"
((QUALITY_SCORE++))
else
echo "❌ Build Gate: FAILED"
fi
# Gate 2: Test Success
if [[ "${{ needs.test-suite.result }}" == "success" ]]; then
echo "✅ Test Gate: PASSED"
((QUALITY_SCORE++))
else
echo "❌ Test Gate: FAILED"
fi
# Gate 3: Security Scan
if [[ "${{ needs.security-scan.result }}" == "success" ]]; then
echo "✅ Security Gate: PASSED"
((QUALITY_SCORE++))
else
echo "❌ Security Gate: FAILED"
fi
# Gate 4: Critical E2E Tests
if [[ "${{ needs.critical-e2e.result }}" == "success" ]]; then
echo "✅ E2E Gate: PASSED"
((QUALITY_SCORE++))
else
echo "❌ E2E Gate: FAILED"
fi
# Gate 5: No High Priority Issues
# This would typically integrate with SonarQube or similar
echo "✅ Code Quality Gate: PASSED (default)"
((QUALITY_SCORE++))
# Calculate quality percentage
QUALITY_PERCENTAGE=$((QUALITY_SCORE * 100 / TOTAL_GATES))
echo ""
echo "📊 Quality Gate Summary:"
echo "Score: $QUALITY_SCORE/$TOTAL_GATES ($QUALITY_PERCENTAGE%)"
# Set quality gate threshold (80%)
if [[ $QUALITY_PERCENTAGE -ge 80 ]]; then
echo "✅ Quality gates PASSED - Ready for deployment"
echo "quality_gates_passed=true" >> $GITHUB_ENV
else
echo "❌ Quality gates FAILED - Deployment blocked"
echo "quality_gates_passed=false" >> $GITHUB_ENV
exit 1
fi
- name: Generate CI summary report
run: |
cat << 'EOF' > ci-summary.md
# 🚀 CI Pipeline Summary
## Quality Gates Results
- **Build & Quality**: ${{ needs.build-and-quality.result }}
- **Test Suite**: ${{ needs.test-suite.result }}
- **Security Scan**: ${{ needs.security-scan.result }}
- **Critical E2E**: ${{ needs.critical-e2e.result }}
## Pipeline Status
${{ env.quality_gates_passed == 'true' && '✅ **READY FOR DEPLOYMENT**' || '❌ **DEPLOYMENT BLOCKED**' }}
## Metrics
- Pipeline Duration: ${{ github.run_duration || 'calculating...' }}
- Commit SHA: `${{ github.sha }}`
- Branch: `${{ github.ref_name }}`
## Next Steps
${{ env.quality_gates_passed == 'true' &&
'- Deployment pipeline will trigger automatically on main branch' ||
'- Fix failing quality gates before deployment' }}
EOF
- name: Upload CI summary
uses: actions/upload-artifact@v4
with:
name: ci-summary
path: ci-summary.md
retention-days: 30
- name: Comment PR with results (if PR)
if: github.event_name == 'pull_request'
uses: actions/github-script@v7
with:
script: |
const fs = require('fs');
const summary = fs.readFileSync('ci-summary.md', 'utf8');
github.rest.issues.createComment({
issue_number: context.issue.number,
owner: context.repo.owner,
repo: context.repo.repo,
body: summary
});
# Job 6: Deployment Readiness Check
deployment-readiness:
name: Deployment Readiness
runs-on: ubuntu-latest
needs: quality-gates
if: |
github.ref == 'refs/heads/main' &&
needs.quality-gates.result == 'success'
steps:
- name: Validate deployment readiness
run: |
echo "🎯 Validating deployment readiness..."
# All quality gates must pass for main branch deployment
echo "✅ All quality gates passed"
echo "✅ On main branch - deployment approved"
echo "🚀 Ready to trigger deployment pipeline"
# Set deployment flag
echo "DEPLOYMENT_APPROVED=true" >> $GITHUB_ENV
- name: Trigger deployment workflow
if: env.DEPLOYMENT_APPROVED == 'true'
uses: actions/github-script@v7
with:
script: |
await github.rest.actions.createWorkflowDispatch({
owner: context.repo.owner,
repo: context.repo.repo,
workflow_id: 'deploy.yml',
ref: 'main',
inputs: {
'trigger_source': 'ci_pipeline',
'commit_sha': context.sha
}
});
console.log('🚀 Deployment pipeline triggered successfully');
# Notification job for important events
notify-completion:
name: Notify Completion
runs-on: ubuntu-latest
needs: [build-and-quality, test-suite, security-scan, critical-e2e, quality-gates]
if: always() && (github.ref == 'refs/heads/main' || github.event_name == 'schedule')
steps:
- name: Determine notification status
run: |
if [[ "${{ needs.quality-gates.result }}" == "success" ]]; then
echo "STATUS=✅ SUCCESS" >> $GITHUB_ENV
echo "MESSAGE=CI pipeline completed successfully - all quality gates passed" >> $GITHUB_ENV
else
echo "STATUS=❌ FAILURE" >> $GITHUB_ENV
echo "MESSAGE=CI pipeline failed - check quality gates for details" >> $GITHUB_ENV
fi
- name: Send notification
run: |
echo "${{ env.STATUS }}: ${{ env.MESSAGE }}"
echo "🔗 Pipeline: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}"
# In a real environment, this would integrate with Slack, Teams, or email
# Example: curl -X POST webhook_url -d "{"text": "${{ env.MESSAGE }}"}"