Skip to content

Fix critical accessibility workflow errors - missing artifacts upload and dependencies #29

Fix critical accessibility workflow errors - missing artifacts upload and dependencies

Fix critical accessibility workflow errors - missing artifacts upload and dependencies #29

name: 'Unified Accessibility Scanner'
on:
schedule:
# Run every Monday at 9:00 AM UTC (weekly)
- cron: '0 9 * * 1'
workflow_dispatch:
inputs:
target_url:
description: 'URL to scan (leave empty for default)'
required: false
type: string
standards:
description: 'Accessibility standards to test'
required: false
default: 'WCAG2AA'
type: choice
options:
- 'WCAG2A'
- 'WCAG2AA'
- 'WCAG2AAA'
- 'Section508'
- 'EN301549'
environment:
description: 'Environment to test'
required: false
default: 'production'
type: choice
options:
- 'development'
- 'staging'
- 'production'
fail_on_issues:
description: 'Fail workflow if accessibility issues found'
required: false
default: false
type: boolean
pull_request:
branches: [main, develop]
types: [opened, synchronize, reopened]
push:
branches: [main]
env:
ACCESSIBILITY_CONFIG_PATH: '.github/accessibility-config.yml'
DEFAULT_TARGET_URL: 'https://ncaa-d1-softball.netlify.app/'
NODE_VERSION: '20'
REPORT_DIR: 'accessibility-reports'
jobs:
# Job 1: Setup and Configuration
setup:
name: 'Setup and Configuration'
runs-on: ubuntu-latest
outputs:
target-url: ${{ steps.config.outputs.target-url }}
standards: ${{ steps.config.outputs.standards }}
environment: ${{ steps.config.outputs.environment }}
fail-on-issues: ${{ steps.config.outputs.fail-on-issues }}
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Parse configuration and inputs
id: config
run: |
echo "Parsing accessibility configuration..."
# Set values from inputs or defaults
TARGET_URL="${{ github.event.inputs.target_url || env.DEFAULT_TARGET_URL }}"
STANDARDS="${{ github.event.inputs.standards || 'WCAG2AA' }}"
ENVIRONMENT="${{ github.event.inputs.environment || 'production' }}"
FAIL_ON_ISSUES="${{ github.event.inputs.fail_on_issues || 'false' }}"
# Override with environment-specific settings if config exists
if [ -f "$ACCESSIBILITY_CONFIG_PATH" ]; then
echo "Loading configuration from $ACCESSIBILITY_CONFIG_PATH"
# Install yq if not available
if ! command -v yq &> /dev/null; then
echo "Installing yq..."
wget -qO /tmp/yq https://github.com/mikefarah/yq/releases/latest/download/yq_linux_amd64
sudo chmod +x /tmp/yq
sudo mv /tmp/yq /usr/local/bin/yq || echo "yq installation failed, using defaults"
fi
# Override target URL based on environment
if command -v yq &> /dev/null; then
if [ "$ENVIRONMENT" = "development" ]; then
TARGET_URL=$(yq eval '.environments.development.target_url // env(TARGET_URL)' $ACCESSIBILITY_CONFIG_PATH)
elif [ "$ENVIRONMENT" = "staging" ]; then
TARGET_URL=$(yq eval '.environments.staging.target_url // env(TARGET_URL)' $ACCESSIBILITY_CONFIG_PATH)
elif [ "$ENVIRONMENT" = "production" ]; then
TARGET_URL=$(yq eval '.environments.production.target_url // env(TARGET_URL)' $ACCESSIBILITY_CONFIG_PATH)
fi
fi
fi
echo "=== Configuration Summary ==="
echo "Target URL: $TARGET_URL"
echo "Standards: $STANDARDS"
echo "Environment: $ENVIRONMENT"
echo "Fail on Issues: $FAIL_ON_ISSUES"
# Set outputs
echo "target-url=$TARGET_URL" >> $GITHUB_OUTPUT
echo "standards=$STANDARDS" >> $GITHUB_OUTPUT
echo "environment=$ENVIRONMENT" >> $GITHUB_OUTPUT
echo "fail-on-issues=$FAIL_ON_ISSUES" >> $GITHUB_OUTPUT
# Job 2: Comprehensive Accessibility Scanning
accessibility-scan:
name: 'Comprehensive Accessibility Scan'
runs-on: ubuntu-latest
needs: setup
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: ${{ env.NODE_VERSION }}
cache: 'npm'
- name: Install dependencies
run: npm ci
- name: Create reports directory
run: mkdir -p ${{ env.REPORT_DIR }}
- name: Install accessibility testing tools
run: |
echo "Installing global accessibility tools..."
npm install -g pa11y @axe-core/cli lighthouse
echo "Installing Playwright dependencies..."
npm install @axe-core/playwright --save-dev --no-save
echo "Verifying Playwright installation..."
if npm list @axe-core/playwright >/dev/null 2>&1; then
echo "✅ @axe-core/playwright installed successfully"
else
echo "❌ @axe-core/playwright installation failed"
fi
- name: Install Playwright browsers
run: |
npx playwright install chromium --with-deps || echo "Browser installation failed, continuing with other tools"
- name: Create pa11y configuration file
run: |
cat > pa11y.json << 'EOF'
{
"chromeLaunchConfig": {
"args": [
"--no-sandbox",
"--disable-dev-shm-usage",
"--disable-gpu",
"--headless"
]
},
"timeout": 30000,
"wait": 10000,
"standard": "WCAG2AA",
"runners": ["htmlcs"],
"ignore": []
}
EOF
- name: Run Axe-core accessibility scan
run: |
echo "Running Axe-core scan..."
TARGET_URL="${{ needs.setup.outputs.target-url }}"
# Run axe scan with custom configuration
npx @axe-core/cli --stdout --save ${{ env.REPORT_DIR }}/axe-report.json "$TARGET_URL" || echo "Axe scan completed with issues"
# Create metrics file
if [ -f "${{ env.REPORT_DIR }}/axe-report.json" ]; then
echo "Processing axe results..."
node -e "
const fs = require('fs');
const data = JSON.parse(fs.readFileSync('${{ env.REPORT_DIR }}/axe-report.json', 'utf8'));
const violations = data.violations || [];
const metrics = {
total: violations.length,
byImpact: violations.reduce((acc, v) => { acc[v.impact] = (acc[v.impact] || 0) + 1; return acc; }, {}),
colorContrastFailures: violations.filter(v => v.id === 'color-contrast').length,
passes: (data.passes || []).length,
incomplete: (data.incomplete || []).length,
};
fs.writeFileSync('${{ env.REPORT_DIR }}/axe-metrics.json', JSON.stringify(metrics, null, 2));
console.log('Axe metrics created');
" || echo "Axe metrics creation failed"
fi
- name: Run Pa11y accessibility scan
run: |
echo "Running Pa11y scan..."
TARGET_URL="${{ needs.setup.outputs.target-url }}"
# Run pa11y with multiple reporters
pa11y --config pa11y.json --reporter json --reporter html --reporter csv "$TARGET_URL" \
> ${{ env.REPORT_DIR }}/pa11y-report.json \
2> ${{ env.REPORT_DIR }}/pa11y-report.html || echo "Pa11y scan completed with issues"
# Ensure valid JSON even if pa11y fails
if [ ! -f "${{ env.REPORT_DIR }}/pa11y-report.json" ] || [ ! -s "${{ env.REPORT_DIR }}/pa11y-report.json" ]; then
echo "[]" > ${{ env.REPORT_DIR }}/pa11y-report.json
fi
- name: Run Lighthouse accessibility audit
run: |
echo "Running Lighthouse accessibility audit..."
TARGET_URL="${{ needs.setup.outputs.target-url }}"
# Run lighthouse for desktop with proper emulation settings
lighthouse "$TARGET_URL" \
--only-categories=accessibility \
--output=json \
--output-path=${{ env.REPORT_DIR }}/lighthouse-accessibility-desktop.json \
--chrome-flags="--headless --no-sandbox --disable-dev-shm-usage" \
--form-factor=desktop \
--screenEmulation.disabled=true \
--screenEmulation.mobile=false || echo "Lighthouse desktop scan completed with issues"
# Run lighthouse for mobile with explicit mobile emulation
lighthouse "$TARGET_URL" \
--only-categories=accessibility \
--output=json \
--output-path=${{ env.REPORT_DIR }}/lighthouse-accessibility-mobile.json \
--chrome-flags="--headless --no-sandbox --disable-dev-shm-usage" \
--form-factor=mobile \
--screenEmulation.mobile=true || echo "Lighthouse mobile scan completed with issues"
- name: Run Playwright accessibility tests
id: playwright_tests
continue-on-error: true
run: |
echo "Running Playwright accessibility tests..."
# Create Playwright output directories for artifacts (traces, screenshots, etc.)
mkdir -p ${{ env.REPORT_DIR }}/playwright-results
# Run Playwright tests with DEBUG enabled.
echo "Executing Playwright with TARGET_URL: ${{ needs.setup.outputs.target-url }} and DEBUG=pw:api"
TARGET_URL="${{ needs.setup.outputs.target-url }}" DEBUG=pw:api npx playwright test tests/accessibility/accessibility.spec.js --project=chromium --output-dir=${{ env.REPORT_DIR }}/playwright-results
# Define source and target for the report
# The playwright.config.js specifies 'playwright-report/results.json' as the output for the JSON reporter.
PLAYWRIGHT_JSON_REPORT_SOURCE="playwright-report/results.json"
PLAYWRIGHT_JSON_REPORT_TARGET="${{ env.REPORT_DIR }}/playwright-report.json"
# Ensure the target directory for the report exists (it should, as REPORT_DIR is created earlier)
mkdir -p "$(dirname "$PLAYWRIGHT_JSON_REPORT_TARGET")"
# Move the JSON report from the default location to the consolidated report directory
if [ -f "$PLAYWRIGHT_JSON_REPORT_SOURCE" ]; then
mv "$PLAYWRIGHT_JSON_REPORT_SOURCE" "$PLAYWRIGHT_JSON_REPORT_TARGET"
echo "Moved Playwright report from $PLAYWRIGHT_JSON_REPORT_SOURCE to $PLAYWRIGHT_JSON_REPORT_TARGET"
# Fallback to the old locations, just in case, with a warning.
elif [ -f "test-results/results.json" ]; then
echo "Warning: Playwright report found at 'test-results/results.json' (expected '$PLAYWRIGHT_JSON_REPORT_SOURCE'). Moving it."
mv "test-results/results.json" "$PLAYWRIGHT_JSON_REPORT_TARGET"
elif [ -f "results.json" ]; then
echo "Warning: Playwright report found at 'results.json' (expected '$PLAYWRIGHT_JSON_REPORT_SOURCE'). Moving it."
mv "results.json" "$PLAYWRIGHT_JSON_REPORT_TARGET"
else
echo "Playwright JSON report not found at '$PLAYWRIGHT_JSON_REPORT_SOURCE' or common fallbacks. Creating an empty report at '$PLAYWRIGHT_JSON_REPORT_TARGET'."
echo "[]" > "$PLAYWRIGHT_JSON_REPORT_TARGET"
fi
- name: Create enhanced keyboard navigation test
run: |
echo "Creating enhanced keyboard navigation test..."
cat > enhanced-keyboard-test.mjs << 'EOF'
import { chromium } from 'playwright';
import fs from 'fs';
import path from 'path';
import { fileURLToPath } from 'url';
const __filename = fileURLToPath(import.meta.url);
const __dirname = path.dirname(__filename);
async function testKeyboardNavigation() {
const browser = await chromium.launch({ headless: true });
const page = await browser.newPage();
const issues = [];
try {
const targetUrl = process.env.TARGET_URL || 'https://ncaa-d1-softball.netlify.app/';
console.log(`Testing keyboard navigation on: ${targetUrl}`);
await page.goto(targetUrl, { waitUntil: 'networkidle', timeout: 30000 });
// Test focusable elements
const focusableElements = await page.$$eval('button, [href], input, select, textarea, [tabindex]:not([tabindex="-1"])', elements => {
return elements.map(el => ({
tagName: el.tagName,
id: el.id,
className: el.className,
tabIndex: el.tabIndex,
textContent: el.textContent ? el.textContent.trim().substring(0, 50) : '',
isVisible: el.offsetParent !== null
}));
});
console.log(`Found ${focusableElements.length} focusable elements`);
if (focusableElements.length === 0) {
issues.push({
type: 'keyboard-navigation',
severity: 'critical',
message: 'No focusable elements found - keyboard users cannot interact with the page',
element: null
});
}
// Test focus indicators
const visibleElements = focusableElements.filter(el => el.isVisible);
const elementsToTest = Math.min(visibleElements.length, 15);
for (let i = 0; i < elementsToTest; i++) {
await page.keyboard.press('Tab');
const activeElement = await page.evaluate(() => {
const el = document.activeElement;
if (!el || el === document.body) return null;
const computedStyle = window.getComputedStyle(el);
const focusStyle = window.getComputedStyle(el, ':focus');
const hasOutline = (computedStyle.outline !== 'none' && computedStyle.outlineWidth !== '0px') ||
(focusStyle.outline !== 'none' && focusStyle.outlineWidth !== '0px');
const hasBoxShadow = computedStyle.boxShadow !== 'none' || focusStyle.boxShadow !== 'none';
const hasBorder = computedStyle.borderWidth !== '0px' || focusStyle.borderWidth !== '0px';
return {
tagName: el.tagName,
id: el.id,
className: el.className,
textContent: el.textContent ? el.textContent.trim().substring(0, 30) : '',
hasVisibleFocus: hasOutline || hasBoxShadow || hasBorder
};
});
if (activeElement && !activeElement.hasVisibleFocus) {
issues.push({
type: 'keyboard-navigation',
severity: 'moderate',
message: `Element ${activeElement.tagName} lacks visible focus indicator`,
element: activeElement
});
}
}
// Test skip links
const skipLinks = await page.$$eval('a[href^="#"]', links => {
return links.filter(link => {
const text = link.textContent.toLowerCase();
return text.includes('skip') || text.includes('main');
}).length;
});
if (skipLinks === 0) {
issues.push({
type: 'keyboard-navigation',
severity: 'serious',
message: 'No skip links found - keyboard users cannot bypass navigation',
element: null
});
}
} catch (error) {
console.error('Keyboard navigation test error:', error);
issues.push({
type: 'keyboard-navigation',
severity: 'critical',
message: `Test failed: ${error.message}`,
element: null
});
}
await browser.close();
// Ensure directory exists
const reportDir = process.env.REPORT_DIR || 'accessibility-reports';
if (!fs.existsSync(reportDir)) {
fs.mkdirSync(reportDir, { recursive: true });
}
fs.writeFileSync(path.join(reportDir, 'keyboard-navigation.json'), JSON.stringify(issues, null, 2));
console.log(`Keyboard navigation test completed. Found ${issues.length} issues.`);
return issues.length;
}
testKeyboardNavigation().catch(err => {
console.error('Test error:', err);
process.exit(1);
});
EOF
# Run the enhanced test with ES modules
TARGET_URL="${{ needs.setup.outputs.target-url }}" REPORT_DIR="${{ env.REPORT_DIR }}" node enhanced-keyboard-test.mjs || echo "Enhanced keyboard test completed with issues"
- name: Create enhanced screen reader test
run: |
echo "Creating enhanced screen reader test..."
cat > enhanced-screenreader-test.mjs << 'EOF'
import { chromium } from 'playwright';
import fs from 'fs';
import path from 'path';
import { fileURLToPath } from 'url';
const __filename = fileURLToPath(import.meta.url);
const __dirname = path.dirname(__filename);
async function testScreenReaderCompatibility() {
const browser = await chromium.launch({ headless: true });
const page = await browser.newPage();
const issues = [];
try {
const targetUrl = process.env.TARGET_URL || 'https://ncaa-d1-softball.netlify.app/';
console.log(`Testing screen reader compatibility on: ${targetUrl}`);
await page.goto(targetUrl, { waitUntil: 'networkidle', timeout: 30000 });
// Test page structure
const pageStructure = await page.evaluate(() => {
const hasMain = document.querySelector('main, [role="main"]') !== null;
const hasH1 = document.querySelector('h1') !== null;
const hasLang = document.documentElement.hasAttribute('lang');
const hasTitle = document.title && document.title.trim() !== '';
const imagesWithoutAlt = document.querySelectorAll('img:not([alt])').length;
const headings = Array.from(document.querySelectorAll('h1, h2, h3, h4, h5, h6')).map(h => parseInt(h.tagName[1]));
const landmarks = document.querySelectorAll('[role="main"], [role="navigation"], [role="banner"], [role="contentinfo"], main, nav, header, footer').length;
return {
hasMain,
hasH1,
hasLang,
hasTitle,
imagesWithoutAlt,
headings,
landmarks
};
});
// Check for critical structural issues
if (!pageStructure.hasMain) {
issues.push({
type: 'screen-reader',
severity: 'critical',
message: 'Page lacks main landmark - screen readers cannot identify main content',
element: { type: 'main-landmark' }
});
}
if (!pageStructure.hasH1) {
issues.push({
type: 'screen-reader',
severity: 'critical',
message: 'Page lacks h1 heading - screen readers cannot identify page topic',
element: { type: 'h1-heading' }
});
}
if (!pageStructure.hasLang) {
issues.push({
type: 'screen-reader',
severity: 'serious',
message: 'HTML element missing lang attribute',
element: { type: 'lang-attribute' }
});
}
if (!pageStructure.hasTitle) {
issues.push({
type: 'screen-reader',
severity: 'serious',
message: 'Page missing or empty title',
element: { type: 'page-title' }
});
}
if (pageStructure.imagesWithoutAlt > 0) {
issues.push({
type: 'screen-reader',
severity: 'serious',
message: `${pageStructure.imagesWithoutAlt} images missing alt text`,
count: pageStructure.imagesWithoutAlt
});
}
if (pageStructure.landmarks === 0) {
issues.push({
type: 'screen-reader',
severity: 'serious',
message: 'No ARIA landmarks found for navigation',
element: { type: 'landmarks' }
});
}
// Check heading hierarchy
if (pageStructure.headings.length > 0) {
let previousLevel = 0;
for (const level of pageStructure.headings) {
if (level > previousLevel + 1) {
issues.push({
type: 'screen-reader',
severity: 'moderate',
message: `Heading hierarchy skip detected (h${previousLevel} to h${level})`,
element: { type: 'heading-hierarchy' }
});
break;
}
previousLevel = level;
}
}
// Test form labels
const unlabeledInputs = await page.evaluate(() => {
const inputs = Array.from(document.querySelectorAll('input, select, textarea'));
return inputs.filter(input => {
const id = input.id;
const hasLabel = id && document.querySelector(`label[for="${id}"]`);
const ariaLabel = input.getAttribute('aria-label');
const ariaLabelledby = input.getAttribute('aria-labelledby');
return !hasLabel && !ariaLabel && !ariaLabelledby;
}).length;
});
if (unlabeledInputs > 0) {
issues.push({
type: 'screen-reader',
severity: 'serious',
message: `${unlabeledInputs} form inputs missing accessible labels`,
count: unlabeledInputs
});
}
} catch (error) {
console.error('Screen reader test error:', error);
issues.push({
type: 'screen-reader',
severity: 'critical',
message: `Test failed: ${error.message}`,
element: null
});
}
await browser.close();
// Ensure directory exists
const reportDir = process.env.REPORT_DIR || 'accessibility-reports';
if (!fs.existsSync(reportDir)) {
fs.mkdirSync(reportDir, { recursive: true });
}
fs.writeFileSync(path.join(reportDir, 'screen-reader.json'), JSON.stringify(issues, null, 2));
console.log(`Screen reader test completed. Found ${issues.length} issues.`);
return issues.length;
}
testScreenReaderCompatibility().catch(err => {
console.error('Test error:', err);
process.exit(1);
});
EOF
# Run the enhanced test with ES modules
TARGET_URL="${{ needs.setup.outputs.target-url }}" REPORT_DIR="${{ env.REPORT_DIR }}" node enhanced-screenreader-test.mjs || echo "Enhanced screen reader test completed with issues"
- name: Install required dependencies
run: |
echo "Installing required system dependencies..."
sudo apt-get update -qq
sudo apt-get install -y bc jq
- name: Generate comprehensive accessibility report
id: quality_gate_check # Added id
run: |
echo "Generating comprehensive accessibility report..."
# Helper function to safely extract numeric values from JSON
safe_jq_number() {
local file="$1"
local query="$2"
local default="${3:-0}"
if [ -f "$file" ] && [ -s "$file" ]; then
local result=$(jq -r "$query" "$file" 2>/dev/null)
# Check if result is a valid number (integer or float)
if [[ "$result" =~ ^[0-9]+(\\.[0-9]+)?$ ]]; then
echo "$result"
elif [[ "$result" == "null" || -z "$result" ]]; then
echo "$default" # Handle null or empty string from jq
else
echo "$default" # Default if not a number (e.g. error message from jq)
fi
else
echo "$default" # Default if file not found or empty
fi
}
# Calculate issue counts and scores with safe extraction
AXCORE_ISSUES=$(safe_jq_number "${{ env.REPORT_DIR }}/axe-report.json" 'if type=="array" and (.[0].violations | length > 0) then .[0].violations | length else if type=="object" and (.violations | length > 0) then .violations | length else 0 end end' 0)
AXCORE_CRITICAL_ISSUES=$(safe_jq_number "${{ env.REPORT_DIR }}/axe-metrics.json" '.byImpact.critical' 0)
PA11Y_ISSUES=$(safe_jq_number "${{ env.REPORT_DIR }}/pa11y-report.json" 'length' 0)
# Pa11y does not easily provide a "critical" count, so we'll use N/A or 0.
PA11Y_CRITICAL_ISSUES="N/A"
KEYBOARD_ISSUES=$(safe_jq_number "${{ env.REPORT_DIR }}/keyboard-navigation.json" 'length' 0)
KEYBOARD_CRITICAL_ISSUES=$(safe_jq_number "${{ env.REPORT_DIR }}/keyboard-navigation.json" '[.[] | select(.severity=="critical")] | length' 0)
SCREENREADER_ISSUES=$(safe_jq_number "${{ env.REPORT_DIR }}/screen-reader.json" 'length' 0)
SCREENREADER_CRITICAL_ISSUES=$(safe_jq_number "${{ env.REPORT_DIR }}/screen-reader.json" '[.[] | select(.severity=="critical")] | length' 0)
LIGHTHOUSE_DESKTOP_SCORE_RAW=$(safe_jq_number "${{ env.REPORT_DIR }}/lighthouse-accessibility-desktop.json" '.categories.accessibility.score * 100' 0)
LIGHTHOUSE_MOBILE_SCORE_RAW=$(safe_jq_number "${{ env.REPORT_DIR }}/lighthouse-accessibility-mobile.json" '.categories.accessibility.score * 100' 0)
# Ensure scores are integers for bash comparisons
LIGHTHOUSE_DESKTOP_SCORE=$(printf "%.0f" "$LIGHTHOUSE_DESKTOP_SCORE_RAW")
LIGHTHOUSE_MOBILE_SCORE=$(printf "%.0f" "$LIGHTHOUSE_MOBILE_SCORE_RAW")
TOTAL_ISSUES=$((AXCORE_ISSUES + PA11Y_ISSUES + KEYBOARD_ISSUES + SCREENREADER_ISSUES))
TOTAL_CRITICAL_ISSUES_NUMERIC=0
if [[ "$AXCORE_CRITICAL_ISSUES" =~ ^[0-9]+$ ]]; then TOTAL_CRITICAL_ISSUES_NUMERIC=$((TOTAL_CRITICAL_ISSUES_NUMERIC + AXCORE_CRITICAL_ISSUES)); fi
if [[ "$KEYBOARD_CRITICAL_ISSUES" =~ ^[0-9]+$ ]]; then TOTAL_CRITICAL_ISSUES_NUMERIC=$((TOTAL_CRITICAL_ISSUES_NUMERIC + KEYBOARD_CRITICAL_ISSUES)); fi
if [[ "$SCREENREADER_CRITICAL_ISSUES" =~ ^[0-9]+$ ]]; then TOTAL_CRITICAL_ISSUES_NUMERIC=$((TOTAL_CRITICAL_ISSUES_NUMERIC + SCREENREADER_CRITICAL_ISSUES)); fi
LIGHTHOUSE_AVERAGE="0"
if [ "$LIGHTHOUSE_DESKTOP_SCORE" -ne 0 ] || [ "$LIGHTHOUSE_MOBILE_SCORE" -ne 0 ]; then
LIGHTHOUSE_AVERAGE=$(echo "scale=1; ($LIGHTHOUSE_DESKTOP_SCORE + $LIGHTHOUSE_MOBILE_SCORE) / 2" | bc -l 2>/dev/null || printf "%.0f" $(((LIGHTHOUSE_DESKTOP_SCORE + LIGHTHOUSE_MOBILE_SCORE) / 2)))
fi
echo "=== Calculated Metrics ==="
echo "Axe-core Issues: $AXCORE_ISSUES (Critical: $AXCORE_CRITICAL_ISSUES)"
echo "Pa11y Issues: $PA11Y_ISSUES"
echo "Keyboard Issues: $KEYBOARD_ISSUES (Critical: $KEYBOARD_CRITICAL_ISSUES)"
echo "Screen Reader Issues: $SCREENREADER_ISSUES (Critical: $SCREENREADER_CRITICAL_ISSUES)"
echo "Total Issues: $TOTAL_ISSUES (Critical: $TOTAL_CRITICAL_ISSUES_NUMERIC)"
echo "Lighthouse Desktop Score: $LIGHTHOUSE_DESKTOP_SCORE%"
echo "Lighthouse Mobile Score: $LIGHTHOUSE_MOBILE_SCORE%"
echo "Lighthouse Average: $LIGHTHOUSE_AVERAGE%"
# Create executive summary JSON (existing logic seems mostly fine, ensure variables are correct)
cat > ${{ env.REPORT_DIR }}/executive-summary.json << EOF
{
"scan_metadata": {
"target_url": "${{ needs.setup.outputs.target-url }}",
"standards": "${{ needs.setup.outputs.standards }}",
"environment": "${{ needs.setup.outputs.environment }}",
"scan_date": "$(date -u +%Y-%m-%dT%H:%M:%SZ)",
"workflow_run_id": "${{ github.run_id }}",
"workflow_run_number": "${{ github.run_number }}"
},
"results_summary": {
"total_issues": $TOTAL_ISSUES,
"critical_violations": $AXCORE_CRITICAL_ISSUES, # This was specific to Axe in original, adjust if needed for overall critical
"axe_issues": $AXCORE_ISSUES,
"pa11y_issues": $PA11Y_ISSUES,
"keyboard_issues": $KEYBOARD_ISSUES,
"screenreader_issues": $SCREENREADER_ISSUES,
"lighthouse_scores": {
"desktop": $LIGHTHOUSE_DESKTOP_SCORE,
"mobile": $LIGHTHOUSE_MOBILE_SCORE,
"average": $LIGHTHOUSE_AVERAGE
}
},
"compliance_status": {
"wcag_2_1_aa": $([ "$AXCORE_ISSUES" -eq 0 ] && echo "true" || echo "false"),
"section_508": $([ "$AXCORE_ISSUES" -eq 0 ] && echo "true" || echo "false"), # Assuming Axe issues map to this
"ada_compliant": $([ "$TOTAL_ISSUES" -eq 0 ] && echo "true" || echo "false"),
"overall_status": "$([ "$TOTAL_ISSUES" -eq 0 ] && echo "passed" || echo "failed")"
}
}
EOF
# Create a simple HTML dashboard report
cat > ${{ env.REPORT_DIR }}/accessibility-dashboard.html << 'EOF'
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Accessibility Scan Results</title>
<style>
body { font-family: Arial, sans-serif; margin: 20px; }
.summary { background: #f5f5f5; padding: 15px; border-radius: 5px; margin-bottom: 20px; }
.pass { color: green; } .fail { color: red; } .warn { color: orange; }
table { border-collapse: collapse; width: 100%; }
th, td { border: 1px solid #ddd; padding: 8px; text-align: left; }
th { background-color: #f2f2f2; }
</style>
</head>
<body>
<h1>🔍 Accessibility Scan Results</h1>
<div class="summary">
<h2>Summary</h2>
<p><strong>Total Issues:</strong> <span class="$([ $TOTAL_ISSUES -eq 0 ] && echo 'pass' || echo 'fail')">$TOTAL_ISSUES</span></p>
<p><strong>Scan Date:</strong> $(date)</p>
<p><strong>Target URL:</strong> ${{ needs.setup.outputs.target-url }}</p>
</div>
<h2>📊 Tool Results</h2>
<table>
<tr><th>Tool</th><th>Issues Found</th><th>Status</th></tr>
<tr><td>Axe-core</td><td>$AXCORE_ISSUES</td><td class="$([ $AXCORE_ISSUES -eq 0 ] && echo 'pass' || echo 'fail')">$([ $AXCORE_ISSUES -eq 0 ] && echo 'PASS' || echo 'FAIL')</td></tr>
<tr><td>Pa11y</td><td>$PA11Y_ISSUES</td><td class="$([ $PA11Y_ISSUES -eq 0 ] && echo 'pass' || echo 'fail')">$([ $PA11Y_ISSUES -eq 0 ] && echo 'PASS' || echo 'FAIL')</td></tr>
<tr><td>Keyboard Navigation</td><td>$KEYBOARD_ISSUES</td><td class="$([ $KEYBOARD_ISSUES -eq 0 ] && echo 'pass' || echo 'fail')">$([ $KEYBOARD_ISSUES -eq 0 ] && echo 'PASS' || echo 'FAIL')</td></tr>
<tr><td>Screen Reader</td><td>$SCREENREADER_ISSUES</td><td class="$([ $SCREENREADER_ISSUES -eq 0 ] && echo 'pass' || echo 'fail')">$([ $SCREENREADER_ISSUES -eq 0 ] && echo 'PASS' || echo 'FAIL')</td></tr>
<tr><td>Lighthouse Desktop</td><td>Score: $LIGHTHOUSE_DESKTOP_SCORE%</td><td class="$([ $LIGHTHOUSE_DESKTOP_SCORE -ge 90 ] && echo 'pass' || echo 'warn')">$([ $LIGHTHOUSE_DESKTOP_SCORE -ge 90 ] && echo 'GOOD' || echo 'NEEDS WORK')</td></tr>
<tr><td>Lighthouse Mobile</td><td>Score: $LIGHTHOUSE_MOBILE_SCORE%</td><td class="$([ $LIGHTHOUSE_MOBILE_SCORE -ge 90 ] && echo 'pass' || echo 'warn')">$([ $LIGHTHOUSE_MOBILE_SCORE -ge 90 ] && echo 'GOOD' || echo 'NEEDS WORK')</td></tr>
</table>
<h2>📋 Report Files</h2>
<ul>
<li><a href="axe-report.json">Axe-core Report</a></li>
<li><a href="pa11y-report.json">Pa11y Report</a></li>
<li><a href="lighthouse-accessibility-desktop.json">Lighthouse Desktop</a></li>
<li><a href="lighthouse-accessibility-mobile.json">Lighthouse Mobile</a></li>
<li><a href="executive-summary.json">Executive Summary</a></li>
</ul>
</body>
</html>
EOF
# GitHub Step Summary Generation
echo "# 🔍 Unified Accessibility Scan Results" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "**Target URL:** ${{ needs.setup.outputs.target-url }}" >> $GITHUB_STEP_SUMMARY
echo "**Standards:** ${{ needs.setup.outputs.standards }}" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "## 📊 Overall Scan Summary" >> $GITHUB_STEP_SUMMARY
echo "| Tool | Total Issues | Critical Issues | Notes |" >> $GITHUB_STEP_SUMMARY
echo "|--------------------------|--------------|-----------------|----------------------------------------------------|" >> $GITHUB_STEP_SUMMARY
LIGHTHOUSE_DESKTOP_STATUS_ICON=$([ "$LIGHTHOUSE_DESKTOP_SCORE" -lt 90 ] && echo "⚠️" || echo "✅")
LIGHTHOUSE_DESKTOP_DISPLAY="Score: $LIGHTHOUSE_DESKTOP_SCORE% $LIGHTHOUSE_DESKTOP_STATUS_ICON"
echo "| Lighthouse (Desktop) | $LIGHTHOUSE_DESKTOP_DISPLAY | N/A | Automated audit for performance, accessibility, etc. |" >> $GITHUB_STEP_SUMMARY
LIGHTHOUSE_MOBILE_STATUS_ICON=$([ "$LIGHTHOUSE_MOBILE_SCORE" -lt 90 ] && echo "⚠️" || echo "✅")
LIGHTHOUSE_MOBILE_DISPLAY="Score: $LIGHTHOUSE_MOBILE_SCORE% $LIGHTHOUSE_MOBILE_STATUS_ICON"
echo "| Lighthouse (Mobile) | $LIGHTHOUSE_MOBILE_DISPLAY | N/A | Automated audit for performance, accessibility, etc. |" >> $GITHUB_STEP_SUMMARY
echo "| Axe-core (WCAG) | $AXCORE_ISSUES | $AXCORE_CRITICAL_ISSUES | Automated WCAG compliance checks. |" >> $GITHUB_STEP_SUMMARY
echo "| Pa11y | $PA11Y_ISSUES | $PA11Y_CRITICAL_ISSUES | Additional automated checks. |" >> $GITHUB_STEP_SUMMARY
echo "| Keyboard Navigation Test | $KEYBOARD_ISSUES | $KEYBOARD_CRITICAL_ISSUES | Custom script for keyboard interaction. |" >> $GITHUB_STEP_SUMMARY
echo "| Screen Reader Test | $SCREENREADER_ISSUES | $SCREENREADER_CRITICAL_ISSUES | Custom script for screen reader compatibility. |" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "## 🚦 Quality Gate Status" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
FAIL_ON_ISSUES_FLAG="${{ needs.setup.outputs.fail-on-issues }}"
# Define a minimum score for Lighthouse, e.g., 90
MIN_LIGHTHOUSE_SCORE=90
QUALITY_GATE_PASSED="true" # Assume pass initially
GATE_FAIL_REASONS=""
if [ "$TOTAL_ISSUES" -gt 0 ]; then
QUALITY_GATE_PASSED="false"
GATE_FAIL_REASONS="<li>Total issues found: $TOTAL_ISSUES</li>"
fi
if [ "$LIGHTHOUSE_AVERAGE" != "N/A" ] && [ $(echo "$LIGHTHOUSE_AVERAGE < $MIN_LIGHTHOUSE_SCORE" | bc -l) -eq 1 ]; then
QUALITY_GATE_PASSED="false"
GATE_FAIL_REASONS="${GATE_FAIL_REASONS}<li>Average Lighthouse score ($LIGHTHOUSE_AVERAGE%) is below threshold ($MIN_LIGHTHOUSE_SCORE%)</li>"
fi
if [ "$QUALITY_GATE_PASSED" = "true" ]; then
echo "✅ Quality Gate: Passed" >> $GITHUB_STEP_SUMMARY
else
echo "❌ Quality Gate: Failed" >> $GITHUB_STEP_SUMMARY
echo "Reasons for failure:" >> $GITHUB_STEP_SUMMARY
echo "<ul>" >> $GITHUB_STEP_SUMMARY
echo "$GATE_FAIL_REASONS" >> $GITHUB_STEP_SUMMARY
echo "</ul>" >> $GITHUB_STEP_SUMMARY
if [ "$FAIL_ON_ISSUES_FLAG" = "true" ]; then
echo "The workflow is configured to fail on quality gate violations." >> $GITHUB_STEP_SUMMARY
exit 1 # Fail the step
else
echo "The workflow is NOT configured to fail on quality gate violations, so it will proceed." >> $GITHUB_STEP_SUMMARY
fi
fi
echo "" >> $GITHUB_STEP_SUMMARY
echo "## 🛠️ How to Fix Issues & Next Steps" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "Review the generated reports in the artifacts for detailed information on each issue." >> $GITHUB_STEP_SUMMARY
echo "Key reports include:" >> $GITHUB_STEP_SUMMARY
echo "- \`axe-report.json\` / \`axe-metrics.json\`" >> $GITHUB_STEP_SUMMARY
echo "- \`pa11y-report.json\` / \`pa11y-report.html\`" >> $GITHUB_STEP_SUMMARY
echo "- \`lighthouse-accessibility-desktop.json\` / \`lighthouse-accessibility-mobile.json\`" >> $GITHUB_STEP_SUMMARY
echo "- \`playwright-report.json\` (and other Playwright artifacts if tests failed)" >> $GITHUB_STEP_SUMMARY
echo "- \`keyboard-navigation.json\`" >> $GITHUB_STEP_SUMMARY
echo "- \`screen-reader.json\`" >> $GITHUB_STEP_SUMMARY
echo "- \`executive-summary.json\`" >> $GITHUB_STEP_SUMMARY
echo "- \`accessibility-dashboard.html\`" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "For guidance on fixing common accessibility issues, refer to WCAG documentation and resources like Deque University, WebAIM, and MDN." >> $GITHUB_STEP_SUMMARY
# Set outputs for the quality gate check
echo "passed=${QUALITY_GATE_PASSED}" >> $GITHUB_OUTPUT
echo "total-issues=${TOTAL_ISSUES}" >> $GITHUB_OUTPUT
echo "critical-issues=${TOTAL_CRITICAL_ISSUES_NUMERIC}" >> $GITHUB_OUTPUT
- name: Upload accessibility reports
uses: actions/upload-artifact@v4
if: always()
with:
name: unified-accessibility-evaluation
path: ${{ env.REPORT_DIR }}
retention-days: 30
- name: Fail workflow if quality gates failed and configured to fail
if: needs.setup.outputs.fail-on-issues == 'true' && steps.quality_gate_check.outputs.passed == 'false' # Changed steps.quality-gates to steps.quality_gate_check
run: |
echo "❌ Accessibility quality gates failed and fail_on_issues is enabled"
echo "Total issues found: ${{ steps.quality_gate_check.outputs.total-issues }}" # Changed steps.quality-gates to steps.quality_gate_check
echo "Critical issues: ${{ steps.quality_gate_check.outputs.critical-issues }}" # Changed steps.quality-gates to steps.quality_gate_check
echo "Workflow will exit with failure status to enforce accessibility standards."
exit 1
# Job 3: PR Comment (only for pull requests)
pr-comment:
name: 'Update PR Comment'
runs-on: ubuntu-latest
needs: [setup, accessibility-scan]
if: github.event_name == 'pull_request'
env:
REPORT_DIR: 'accessibility-reports'
permissions:
pull-requests: write
steps:
- name: Download reports
uses: actions/download-artifact@v4
with:
name: unified-accessibility-evaluation
path: ${{ env.REPORT_DIR }}
- name: Install required dependencies
run: |
echo "Installing required dependencies for PR comment generation..."
sudo apt-get update -qq
sudo apt-get install -y bc jq
- name: Generate PR comment
run: |
# Calculate results
AXCORE_ISSUES=$(jq 'if type=="array" then .[0].violations else .violations end | length' ${{ env.REPORT_DIR }}/axe-report.json 2>/dev/null || echo 0)
TOTAL_ISSUES=$(jq '.results_summary.total_issues' ${{ env.REPORT_DIR }}/executive-summary.json 2>/dev/null || echo 0)
LIGHTHOUSE_SCORE=$(jq '.results_summary.lighthouse_scores.average' ${{ env.REPORT_DIR }}/executive-summary.json 2>/dev/null || echo 0)
# Create PR comment
cat > pr-comment.md << EOF
# 🔍 Accessibility Scan Results
**Scan completed for:** ${{ needs.setup.outputs.target-url }}
**Standards:** ${{ needs.setup.outputs.standards }}
**Workflow Run:** [${{ github.run_number }}](${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }})
## 📊 Results Summary
| Metric | Value | Status |
|--------|-------|---------|
| **Total Issues** | $TOTAL_ISSUES | $([ "$TOTAL_ISSUES" -eq "0" ] && echo "✅ PASS" || echo "❌ FAIL") |
| **WCAG Violations** | $AXCORE_ISSUES | $([ "$AXCORE_ISSUES" -eq "0" ] && echo "✅ PASS" || echo "❌ FAIL") |
| **Lighthouse Score** | ${LIGHTHOUSE_SCORE}% | $([ "$(echo "$LIGHTHOUSE_SCORE >= 90" | bc -l 2>/dev/null || echo 0)" -eq "1" ] && echo "✅ GOOD" || echo "⚠️ NEEDS WORK") |
## 🎯 Compliance Status
- **WCAG 2.1 AA:** $([ "$AXCORE_ISSUES" -eq "0" ] && echo "✅ COMPLIANT" || echo "❌ NON-COMPLIANT")
- **Section 508:** $([ "$AXCORE_ISSUES" -eq "0" ] && echo "✅ COMPLIANT" || echo "❌ NON-COMPLIANT")
- **ADA:** $([ "$TOTAL_ISSUES" -eq "0" ] && echo "✅ COMPLIANT" || echo "❌ NON-COMPLIANT")
- **WCAG 2.0 AA:** $([ "$AXCORE_ISSUES" -eq "0" ] && echo "✅ COMPLIANT" || echo "❌ NON-COMPLIANT")
$(if [ "$TOTAL_ISSUES" -eq "0" ]; then
echo "## 🎉 Congratulations!"
echo ""
echo "Your changes pass all accessibility tests! The application is ready for users with disabilities."
else
echo "## ⚠️ Action Required"
echo ""
echo "Please address the accessibility issues found before merging. Download the detailed reports from the workflow artifacts."
echo ""
echo "**Priority:** Fix WCAG violations first, then address keyboard and screen reader issues."
fi)
---
📋 **Detailed Reports:** Available in workflow artifacts • 🔄 **Re-scan:** Push new commits to trigger another scan
EOF
- name: Comment on PR
uses: actions/github-script@v7
with:
script: |
const fs = require('fs');
const comment = fs.readFileSync('pr-comment.md', 'utf8');
// Find existing comment to update
const { data: comments } = await github.rest.issues.listComments({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.issue.number,
});
const existingComment = comments.find(comment =>
comment.body.includes('🔍 Accessibility Scan Results')
);
if (existingComment) {
await github.rest.issues.updateComment({
owner: context.repo.owner,
repo: context.repo.repo,
comment_id: existingComment.id,
body: comment
});
} else {
await github.rest.issues.createComment({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.issue.number,
body: comment
});
}