test 1 #408
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| name: Orchestrate Benchmarks | |
| on: | |
| pull_request: | |
| types: [labeled, synchronize, opened, reopened] | |
| permissions: | |
| actions: write | |
| contents: write | |
| pull-requests: write | |
| jobs: | |
| guard: | |
| runs-on: ubuntu-latest | |
| timeout-minutes: 30 | |
| outputs: | |
| benchmarks: ${{ steps.check.outputs.benchmarks }} | |
| run_any: ${{ steps.check.outputs.run_any }} | |
| benchmarks_summary: ${{ steps.check.outputs.benchmarks_summary }} | |
| steps: | |
| - name: Checkout repository | |
| uses: actions/checkout@v6 | |
| - name: Check which benchmarks should run | |
| id: check | |
| run: | | |
| echo "🛡️ Guard job: Checking execution conditions..." | |
| # Load benchmarks configuration from JSON | |
| CONFIG_FILE="benchmarks/benchmarks-config.json" | |
| if [ ! -f "$CONFIG_FILE" ]; then | |
| echo "❌ Configuration file not found: $CONFIG_FILE" | |
| exit 1 | |
| fi | |
| # Extract benchmark IDs from JSON | |
| BENCHMARK_IDS=$(jq -r '.benchmarks[].id' "$CONFIG_FILE") | |
| EVENT_NAME="${{ github.event_name }}" | |
| echo "📋 Event type: $EVENT_NAME" | |
| SELECTED='[]' | |
| BENCHMARKS_LIST="" | |
| if [[ "$EVENT_NAME" == "push" ]]; then | |
| # For push events to main, run no benchmarks | |
| echo "🚀 Push event to main branch detected" | |
| echo "✅ Running no benchmarks" | |
| elif [[ "$EVENT_NAME" == "pull_request" ]]; then | |
| # For PR events, check base branch and labels | |
| LABELS="${{ join(github.event.pull_request.labels.*.name, ' ') }}" | |
| BASE="${{ github.event.pull_request.base.ref }}" | |
| echo "🎯 Base branch: $BASE" | |
| echo "🏷️ PR labels: $LABELS" | |
| if [[ "$BASE" != "main" ]]; then | |
| echo "❌ Base branch is not 'main' - skipping benchmarks" | |
| echo "benchmarks=[]" >> $GITHUB_OUTPUT | |
| echo "run_any=false" >> $GITHUB_OUTPUT | |
| echo "benchmarks_summary=none" >> $GITHUB_OUTPUT | |
| exit 0 | |
| fi | |
| echo "✅ Base branch check passed (main)" | |
| # Build JSON array of selected benchmarks | |
| FIRST=true | |
| # Check for group labels (pattern: "run bench {prefix}-all") | |
| # Extract all potential group prefixes from labels | |
| GROUP_PREFIXES=$(echo "$LABELS" | grep -oE "run bench [a-z0-9]+-all" | sed 's/run bench //g' | sed 's/-all//g') | |
| if [ -n "$GROUP_PREFIXES" ]; then | |
| # Process each group label found | |
| for prefix in $GROUP_PREFIXES; do | |
| echo "✅ Found 'run bench ${prefix}-all' label" | |
| # Add all benchmarks starting with this prefix | |
| for bench_id in $BENCHMARK_IDS; do | |
| if [[ "$bench_id" == ${prefix}-* ]]; then | |
| # Get full benchmark config from JSON | |
| BENCH_CONFIG=$(jq -c ".benchmarks[] | select(.id == \"$bench_id\")" "$CONFIG_FILE") | |
| if [ "$FIRST" = true ]; then | |
| SELECTED="[$BENCH_CONFIG" | |
| BENCHMARKS_LIST="$bench_id" | |
| FIRST=false | |
| else | |
| SELECTED="$SELECTED,$BENCH_CONFIG" | |
| BENCHMARKS_LIST="$BENCHMARKS_LIST, $bench_id" | |
| fi | |
| fi | |
| done | |
| done | |
| fi | |
| # Check for individual benchmark labels | |
| for bench_id in $BENCHMARK_IDS; do | |
| ESCAPED_BENCH_ID=$(printf '%s' "$bench_id" | sed 's/[][\\.^$*+?(){}|/-]/\\&/g') | |
| if echo "$LABELS" | grep -qE "(^| )run bench ${ESCAPED_BENCH_ID}( |$)"; then | |
| echo "✅ Found 'run bench $bench_id' label" | |
| # Get full benchmark config from JSON | |
| BENCH_CONFIG=$(jq -c ".benchmarks[] | select(.id == \"$bench_id\")" "$CONFIG_FILE") | |
| if [ "$FIRST" = true ]; then | |
| SELECTED="[$BENCH_CONFIG" | |
| BENCHMARKS_LIST="$bench_id" | |
| FIRST=false | |
| else | |
| SELECTED="$SELECTED,$BENCH_CONFIG" | |
| BENCHMARKS_LIST="$BENCHMARKS_LIST, $bench_id" | |
| fi | |
| fi | |
| done | |
| # Close JSON array | |
| if [ "$FIRST" = true ]; then | |
| SELECTED='[]' | |
| BENCHMARKS_LIST="none" | |
| echo "❌ No benchmark labels found" | |
| # Extract unique prefixes from benchmark IDs for group label suggestions | |
| PREFIXES=$(echo "$BENCHMARK_IDS" | grep -oE '^[a-z0-9]+' | sort -u) | |
| GROUP_LABELS=$(echo "$PREFIXES" | sed 's/^/run bench /g' | sed 's/$/-all/g' | tr '\n' ',' | sed 's/,/, /g' | sed 's/, $//') | |
| echo "ℹ️ Expected labels:" | |
| echo " • Individual: $(echo $BENCHMARK_IDS | tr '\n' ' ' | sed 's/ /, run bench /g' | sed 's/^/run bench /')" | |
| echo " • Groups: $GROUP_LABELS" | |
| else | |
| SELECTED="$SELECTED]" | |
| fi | |
| else | |
| echo "❌ Unexpected event type: $EVENT_NAME" | |
| fi | |
| # Set outputs | |
| echo "benchmarks=$SELECTED" >> $GITHUB_OUTPUT | |
| if [ "$SELECTED" = "[]" ]; then | |
| echo "run_any=false" >> $GITHUB_OUTPUT | |
| else | |
| echo "run_any=true" >> $GITHUB_OUTPUT | |
| fi | |
| echo "benchmarks_summary=$BENCHMARKS_LIST" >> $GITHUB_OUTPUT | |
| echo "" | |
| echo "📊 Summary: Will run benchmarks for: $BENCHMARKS_LIST" | |
| echo "📋 Selected benchmarks JSON: $SELECTED" | |
| benchmark: | |
| needs: guard | |
| if: needs.guard.outputs.run_any == 'true' | |
| strategy: | |
| matrix: | |
| benchmark: ${{ fromJSON(needs.guard.outputs.benchmarks) }} | |
| fail-fast: false | |
| uses: ./.github/workflows/benchmark-reusable.yml | |
| with: | |
| script_path: benchmarks/${{ matrix.benchmark.id }}.jl | |
| julia_version: ${{ matrix.benchmark.julia_version }} | |
| julia_arch: ${{ matrix.benchmark.julia_arch }} | |
| runs_on: ${{ matrix.benchmark.runs_on }} | |
| runner: ${{ matrix.benchmark.runner }} | |
| docs: | |
| needs: [guard, benchmark] | |
| if: | | |
| always() && | |
| (needs.guard.result == 'success') && | |
| (needs.benchmark.result != 'cancelled') && | |
| (needs.benchmark.result != 'failure') | |
| runs-on: ubuntu-latest | |
| steps: | |
| # --------------------------- | |
| # Checkout repository | |
| # --------------------------- | |
| - name: Checkout with latest changes | |
| uses: actions/checkout@v6 | |
| with: | |
| ref: ${{ github.head_ref || github.ref_name }} | |
| token: ${{ secrets.GITHUB_TOKEN }} | |
| fetch-depth: 0 | |
| - name: Pull latest changes including benchmark results | |
| run: | | |
| echo "🔄 Pulling latest changes from branch..." | |
| git pull origin ${{ github.head_ref || github.ref_name }} | |
| echo "✅ Latest changes pulled" | |
| # --------------------------- | |
| # Setup Julia | |
| # --------------------------- | |
| - uses: julia-actions/setup-julia@latest | |
| # --------------------------- | |
| # Cache Julia packages and compiled artifacts | |
| # --------------------------- | |
| - name: Cache Julia packages for docs | |
| uses: actions/cache@v5 | |
| with: | |
| path: | | |
| ~/.julia/packages | |
| ~/.julia/compiled | |
| key: julia-docs-${{ hashFiles('docs/Project.toml') }} | |
| restore-keys: julia-docs- | |
| # --------------------------- | |
| # Build Julia package for docs | |
| # --------------------------- | |
| - uses: julia-actions/julia-buildpkg@latest | |
| with: | |
| ignore-no-cache: true | |
| - name: Install dependencies for docs | |
| env: | |
| JULIA_PKG_SERVER_REGISTRY_PREFERENCE: eager | |
| JULIA_PKG_PRECOMPILE_DELAY: 0 | |
| run: | | |
| julia --project=docs/ --startup-file=no -e 'using Pkg; Pkg.develop(PackageSpec(path=pwd())); Pkg.instantiate()' | |
| continue-on-error: true | |
| - name: Retry install dependencies for docs (if failed) | |
| if: failure() | |
| env: | |
| JULIA_PKG_SERVER_REGISTRY_PREFERENCE: eager | |
| JULIA_PKG_PRECOMPILE_DELAY: 0 | |
| run: | | |
| sleep 30 | |
| julia --project=docs/ --startup-file=no -e 'using Pkg; Pkg.develop(PackageSpec(path=pwd())); Pkg.instantiate()' | |
| # --------------------------- | |
| # Build and deploy documentation | |
| # --------------------------- | |
| - name: Build and deploy | |
| env: | |
| GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} | |
| DOCUMENTER_KEY: ${{ secrets.DOCUMENTER_KEY }} | |
| GKSwstype: 100 | |
| run: | | |
| julia --project=docs/ -e 'ENV["GKSwstype"]="nul"; include("docs/make.jl")' | |
| notify-failure: | |
| needs: [guard, benchmark, docs] | |
| if: failure() | |
| runs-on: ubuntu-latest | |
| steps: | |
| - name: Comment on PR with failure notification | |
| uses: actions/github-script@v8 | |
| env: | |
| BENCHMARK_RESULT: ${{ needs.benchmark.result }} | |
| DOCS_RESULT: ${{ needs.docs.result }} | |
| with: | |
| script: | | |
| console.log('🚨 Workflow failure detected - posting/updating comment...'); | |
| const marker = '<!-- ctbenchmarks-status-comment -->'; | |
| const prNumber = context.payload.pull_request.number; | |
| const failedJobs = []; | |
| const now = new Date(); | |
| const utcMillis = now.getTime() + now.getTimezoneOffset() * 60000; | |
| const tzMillis = utcMillis - 2 * 60 * 60 * 1000; // UTC-2 | |
| const tzDate = new Date(tzMillis); | |
| const pad = (value) => value.toString().padStart(2, '0'); | |
| const timestamp = `${tzDate.getFullYear()}-${pad(tzDate.getMonth() + 1)}-${pad(tzDate.getDate())} ${pad(tzDate.getHours())}:${pad(tzDate.getMinutes())}:${pad(tzDate.getSeconds())} (UTC-2)`; | |
| // Check benchmark job | |
| if (process.env.BENCHMARK_RESULT === 'failure') { | |
| failedJobs.push('Benchmarks'); | |
| } | |
| // Check docs job | |
| if (process.env.DOCS_RESULT === 'failure') { | |
| failedJobs.push('Documentation'); | |
| } | |
| const runUrl = `${context.payload.repository.html_url}/actions/runs/${context.runId}`; | |
| const comment = `${marker} | |
| ## ❌ Workflow Failed | |
| The benchmark and documentation workflow encountered failures: | |
| ### Failed Jobs | |
| ${failedJobs.map(job => `- ❌ ${job}`).join('\n')} | |
| ### 🔍 Troubleshooting | |
| - Check the [workflow run](${runUrl}) for detailed logs | |
| - Verify that all required dependencies are available | |
| - Ensure the benchmark code is functioning correctly | |
| ### 🔄 Next Steps | |
| - Fix any issues identified in the logs | |
| - Push new commits to retry, or | |
| - Remove and re-add the benchmark label to restart | |
| --- | |
| *🤖 This notification was automatically generated* ${timestamp}`; | |
| // Find existing failure comment | |
| const { data: comments } = await github.rest.issues.listComments({ | |
| owner: context.repo.owner, | |
| repo: context.repo.repo, | |
| issue_number: prNumber, | |
| per_page: 100 | |
| }); | |
| const existing = comments.find(c => c.body && c.body.includes(marker)); | |
| if (existing) { | |
| console.log(`✏️ Updating existing comment ${existing.id}`); | |
| await github.rest.issues.updateComment({ | |
| owner: context.repo.owner, | |
| repo: context.repo.repo, | |
| comment_id: existing.id, | |
| body: comment | |
| }); | |
| } else { | |
| console.log('💬 Posting new comment to PR...'); | |
| await github.rest.issues.createComment({ | |
| owner: context.repo.owner, | |
| repo: context.repo.repo, | |
| issue_number: prNumber, | |
| body: comment | |
| }); | |
| } | |
| console.log('✅ Failure comment posted/updated successfully'); | |
| notify-success: | |
| needs: [guard, benchmark, docs] | |
| if: | | |
| always() && | |
| (needs.guard.result == 'success') && | |
| (needs.docs.result == 'success') && | |
| (needs.benchmark.result != 'cancelled') && | |
| (needs.benchmark.result != 'failure') | |
| runs-on: ubuntu-latest | |
| steps: | |
| - name: Comment on PR with success notification | |
| uses: actions/github-script@v8 | |
| env: | |
| BENCHMARKS_SUMMARY: ${{ needs.guard.outputs.benchmarks_summary }} | |
| with: | |
| script: | | |
| console.log('🎉 Workflow success detected - posting/updating comment...'); | |
| const marker = '<!-- ctbenchmarks-status-comment -->'; | |
| const prNumber = context.payload.pull_request.number; | |
| const previewUrl = `https://control-toolbox.org/CTBenchmarks.jl/previews/PR${prNumber}/index.html`; | |
| const benchmarksSummary = process.env.BENCHMARKS_SUMMARY; | |
| const runUrl = `${context.payload.repository.html_url}/actions/runs/${context.runId}`; | |
| const branchName = context.payload.pull_request.head.ref; | |
| const now = new Date(); | |
| const utcMillis = now.getTime() + now.getTimezoneOffset() * 60000; | |
| const tzMillis = utcMillis - 2 * 60 * 60 * 1000; // UTC-2 | |
| const tzDate = new Date(tzMillis); | |
| const pad = (value) => value.toString().padStart(2, '0'); | |
| const timestamp = `${tzDate.getFullYear()}-${pad(tzDate.getMonth() + 1)}-${pad(tzDate.getDate())} ${pad(tzDate.getHours())}:${pad(tzDate.getMinutes())}:${pad(tzDate.getSeconds())} (UTC-2)`; | |
| const previewSection = ` | |
| ### 📖 Documentation Preview | |
| - 🌐 **[📚 View Documentation Preview](${previewUrl})** ← Click to see your changes! | |
| `; | |
| const comment = `${marker} | |
| ## ✅ Benchmark and Documentation Complete | |
| The automated workflow has completed successfully! 🎉 | |
| ### ✅ Completed Tasks | |
| - 📊 **Benchmarks**: ${benchmarksSummary} executed and results saved to your branch | |
| - 📚 **Documentation**: Documentation updated successfully | |
| - 🔄 **Integration**: All changes integrated properly | |
| ${previewSection} | |
| ### 📋 Results | |
| - 🎯 Benchmark results have been committed to your feature branch | |
| - 📚 Documentation has been regenerated with the latest benchmark data | |
| ### 🔗 Links | |
| - 📊 [View workflow run](${runUrl}) | |
| - 🌿 [View your feature branch](${context.payload.repository.html_url}/tree/${branchName}) | |
| --- | |
| *🤖 This notification was automatically generated* ${timestamp}`; | |
| // Find existing success comment | |
| const { data: comments } = await github.rest.issues.listComments({ | |
| owner: context.repo.owner, | |
| repo: context.repo.repo, | |
| issue_number: prNumber, | |
| per_page: 100 | |
| }); | |
| const existing = comments.find(c => c.body && c.body.includes(marker)); | |
| if (existing) { | |
| console.log(`✏️ Updating existing comment ${existing.id}`); | |
| await github.rest.issues.updateComment({ | |
| owner: context.repo.owner, | |
| repo: context.repo.repo, | |
| comment_id: existing.id, | |
| body: comment | |
| }); | |
| } else { | |
| console.log('💬 Posting new comment to PR...'); | |
| await github.rest.issues.createComment({ | |
| owner: context.repo.owner, | |
| repo: context.repo.repo, | |
| issue_number: prNumber, | |
| body: comment | |
| }); | |
| } | |
| console.log('✅ Success comment posted/updated successfully'); | |
| workflow-summary: | |
| needs: [guard, benchmark, docs] | |
| if: always() | |
| runs-on: ubuntu-latest | |
| steps: | |
| - name: 📊 Final workflow summary | |
| env: | |
| BENCHMARK_RESULT: ${{ needs.benchmark.result }} | |
| DOCS_RESULT: ${{ needs.docs.result }} | |
| run: | | |
| echo "📋 ===================================" | |
| echo "📊 WORKFLOW EXECUTION SUMMARY" | |
| echo "📋 ===================================" | |
| echo "" | |
| echo "🛡️ Guard Job: ✅ PASSED" | |
| echo " └─ Benchmarks planned: ${{ needs.guard.outputs.benchmarks_summary }}" | |
| echo "" | |
| # Display benchmark results | |
| if [ "$BENCHMARK_RESULT" == "success" ]; then | |
| echo "📊 Benchmarks: ✅ SUCCESS" | |
| elif [ "$BENCHMARK_RESULT" == "failure" ]; then | |
| echo "📊 Benchmarks: ❌ FAILED" | |
| elif [ "$BENCHMARK_RESULT" == "skipped" ]; then | |
| echo "📊 Benchmarks: ⏭️ SKIPPED" | |
| fi | |
| # Display docs result | |
| if [ "$DOCS_RESULT" == "success" ]; then | |
| echo "📚 Documentation: ✅ SUCCESS" | |
| echo " └─ Docs updated successfully" | |
| elif [ "$DOCS_RESULT" == "failure" ]; then | |
| echo "📚 Documentation: ❌ FAILED" | |
| echo " └─ Check logs for details" | |
| else | |
| echo "📚 Documentation: ⏭️ SKIPPED" | |
| fi | |
| echo "" | |
| echo "🔗 Workflow URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}" | |
| echo "" | |
| # Check overall status | |
| overall_status="✅ SUCCESS" | |
| if [ "$BENCHMARK_RESULT" == "failure" ] || [ "$DOCS_RESULT" == "failure" ]; then | |
| overall_status="❌ FAILED" | |
| fi | |
| echo "🎯 Overall Status: $overall_status" | |
| echo "📋 ===================================" |