Skip to content

Fix GlobalDrcForceImproveSolver Debugging Blind Spots With Fixed/Unfixed DRC Error Markers #26

Fix GlobalDrcForceImproveSolver Debugging Blind Spots With Fixed/Unfixed DRC Error Markers

Fix GlobalDrcForceImproveSolver Debugging Blind Spots With Fixed/Unfixed DRC Error Markers #26

Workflow file for this run

name: Benchmark
on:
issue_comment:
types: [created]
pull_request:
types: [opened, reopened, synchronize, edited]
push:
branches:
- main
workflow_dispatch:
inputs:
scenario_limit:
description: 'Scenario limit (number or "all"). Default: all.'
required: false
type: string
concurrency:
description: Number of workers, or "auto" (optional)
required: false
type: string
effort:
description: Solver effort value (optional)
required: false
type: string
max_iterations:
description: Solver max iterations override (optional)
required: false
type: string
ref:
description: Git ref (branch, tag, or SHA) to benchmark
required: false
type: string
permissions:
contents: read
issues: write
pull-requests: write
actions: read
jobs:
benchmark:
name: Run benchmark
if: |
github.event_name == 'workflow_dispatch' || (
github.event_name == 'push' &&
github.ref_name == 'main'
) || (
github.event_name == 'pull_request' &&
contains(github.event.pull_request.title, '[BENCHMARK TEST]')
) || (
github.event_name == 'issue_comment' &&
github.event.issue.pull_request &&
github.event.comment.user.type != 'Bot' &&
startsWith(github.event.comment.body, '/benchmark') &&
(
github.event.comment.author_association == 'OWNER' ||
github.event.comment.author_association == 'MEMBER' ||
github.event.comment.author_association == 'CONTRIBUTOR' ||
github.event.comment.author_association == 'COLLABORATOR'
)
)
runs-on: ubuntu-latest
timeout-minutes: 60
steps:
- name: Parse benchmark command
id: parse
uses: actions/github-script@v7
with:
github-token: ${{ secrets.TSCIRCUIT_BOT_GITHUB_TOKEN }}
script: |
const isComment = context.eventName === 'issue_comment'
const splitShellArgs = (input) => {
const args = []
let current = ''
let quote = null
let escaping = false
let tokenStarted = false
const pushCurrent = () => {
if (!tokenStarted) return
args.push(current)
current = ''
tokenStarted = false
}
for (const char of input) {
if (escaping) {
if (quote === '"' && char === '\n') {
escaping = false
continue
}
if (quote === '"' && !['"', '\\', '$', '`'].includes(char)) {
current += '\\'
}
current += char
tokenStarted = true
escaping = false
continue
}
if (quote === "'") {
if (char === "'") {
quote = null
} else {
current += char
}
tokenStarted = true
continue
}
if (quote === '"') {
if (char === '"') {
quote = null
} else if (char === '\\') {
escaping = true
} else {
current += char
}
tokenStarted = true
continue
}
if (/\s/.test(char)) {
pushCurrent()
continue
}
if (char === "'" || char === '"') {
quote = char
tokenStarted = true
continue
}
if (char === '\\') {
escaping = true
tokenStarted = true
continue
}
current += char
tokenStarted = true
}
if (escaping) {
current += '\\'
}
if (quote !== null) {
throw new Error('Unterminated quote in /benchmark command')
}
pushCurrent()
return args
}
let benchmarkArgs = []
let ref = context.sha
let statusCommentId = ''
if (isComment) {
const body = context.payload.comment.body.trim()
const commentArgs = body.replace(/^\/benchmark\b/, '').trim()
benchmarkArgs = splitShellArgs(commentArgs)
const pr = await github.rest.pulls.get({
owner: context.repo.owner,
repo: context.repo.repo,
pull_number: context.issue.number,
})
ref = pr.data.head.sha
const statusComment = await github.rest.issues.createComment({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.issue.number,
body: `## Benchmark\n\nRunning benchmark on \`${ref.slice(0, 7)}\`...\n\nWorkflow: [View run](${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }})`,
})
statusCommentId = String(statusComment.data.id)
}
if (context.eventName === 'workflow_dispatch') {
const inputs = context.payload.inputs || {}
const scenarioLimit = (inputs.scenario_limit || '').trim()
const concurrency = (inputs.concurrency || '').trim()
const effort = (inputs.effort || '').trim()
const maxIterations = (inputs.max_iterations || '').trim()
if (scenarioLimit) {
benchmarkArgs.push('--scenario-limit', scenarioLimit)
}
if (concurrency) {
benchmarkArgs.push('--concurrency', concurrency)
}
if (effort) {
benchmarkArgs.push('--effort', effort)
}
if (maxIterations) {
benchmarkArgs.push('--max-iterations', maxIterations)
}
ref = (inputs.ref || '').trim() || ref
}
if (context.eventName === 'pull_request') {
ref = context.payload.pull_request.head.sha
}
core.setOutput('benchmark_args_json', JSON.stringify(benchmarkArgs))
core.setOutput('ref', ref)
core.setOutput('status_comment_id', statusCommentId)
- name: Checkout code
uses: actions/checkout@v4
with:
ref: ${{ steps.parse.outputs.ref }}
- name: Setup bun
uses: oven-sh/setup-bun@v2
with:
bun-version: latest
- name: Install dependencies
run: bun install --no-save
- name: Run benchmark
env:
BENCHMARK_ARGS_JSON: ${{ steps.parse.outputs.benchmark_args_json }}
run: |
set -o pipefail
chmod +x ./benchmark.sh
node <<'NODE' 2>&1 | tee benchmark-result.txt
const { spawnSync } = require('node:child_process')
const args = JSON.parse(process.env.BENCHMARK_ARGS_JSON || '[]')
const renderedArgs = args.map((arg) => JSON.stringify(arg)).join(' ')
console.log(`Running benchmark command: ./benchmark.sh${renderedArgs ? ` ${renderedArgs}` : ''}`)
const result = spawnSync('./benchmark.sh', args, {
stdio: 'inherit',
env: process.env,
})
if (result.error) {
throw result.error
}
process.exit(result.status ?? 1)
NODE
- name: Save PR benchmark output
if: always()
run: |
if [ -f benchmark-result.txt ]; then
cp benchmark-result.txt benchmark-result-pr.txt
fi
if [ -f benchmark-result.json ]; then
cp benchmark-result.json benchmark-result-pr.json
fi
- name: Download main branch benchmark result
if: always() && github.event_name == 'issue_comment'
env:
GH_TOKEN: ${{ secrets.TSCIRCUIT_BOT_GITHUB_TOKEN }}
run: |
RUN_ID=$(gh api "repos/${{ github.repository }}/actions/workflows/benchmark.yml/runs?branch=main&event=push&status=success&per_page=1" --jq '.workflow_runs[0].id' 2>/dev/null || echo "")
if [ -z "$RUN_ID" ] || [ "$RUN_ID" = "null" ]; then
echo "(no main branch benchmark result available)" > benchmark-result-main.txt
echo "" > benchmark-result-main.json
exit 0
fi
gh run download "$RUN_ID" --repo "${{ github.repository }}" --name benchmark-result --dir ./main-artifact 2>/dev/null || true
if [ -f ./main-artifact/benchmark-result.txt ]; then
cp ./main-artifact/benchmark-result.txt benchmark-result-main.txt
else
echo "(no main branch benchmark result available)" > benchmark-result-main.txt
fi
if [ -f ./main-artifact/benchmark-result.json ]; then
cp ./main-artifact/benchmark-result.json benchmark-result-main.json
else
echo "" > benchmark-result-main.json
fi
- name: Upload benchmark result
if: always()
uses: actions/upload-artifact@v4
with:
name: benchmark-result
path: |
./benchmark-result.txt
./benchmark-result.json
./benchmark-result-pr.txt
./benchmark-result-pr.json
./benchmark-result-main.txt
./benchmark-result-main.json
overwrite: true
if-no-files-found: ignore
- name: Post benchmark result comment
if: always() && github.event_name == 'issue_comment' && steps.parse.outputs.status_comment_id != ''
uses: actions/github-script@v7
with:
github-token: ${{ secrets.TSCIRCUIT_BOT_GITHUB_TOKEN }}
script: |
const fs = require('node:fs')
const maxLength = 60000
const truncate = (s, max) => s.length > max ? `${s.slice(0, max)}\n\n...truncated...` : s
const formatDuration = (value) => {
if (typeof value !== 'number' || !Number.isFinite(value)) return 'n/a'
return value < 1000 ? `${Math.round(value)}ms` : `${(value / 1000).toFixed(2)}s`
}
const formatSignedInteger = (value) => {
if (!Number.isFinite(value) || value === 0) return value === 0 ? '0' : 'n/a'
return value > 0 ? `+${Math.round(value)}` : `${Math.round(value)}`
}
const formatSignedDuration = (value) => {
if (!Number.isFinite(value) || value === 0) return value === 0 ? '0ms' : 'n/a'
const absolute = Math.abs(value)
const rendered = absolute < 1000 ? `${Math.round(absolute)}ms` : `${(absolute / 1000).toFixed(2)}s`
return value > 0 ? `+${rendered}` : `-${rendered}`
}
const readJson = (path) => {
if (!fs.existsSync(path)) return null
const raw = fs.readFileSync(path, 'utf8').trim()
if (!raw) return null
try {
return JSON.parse(raw)
} catch {
return null
}
}
const readText = (path) => {
if (!fs.existsSync(path)) return null
return fs.readFileSync(path, 'utf8').trim()
}
const deltaValue = (report, baseline, field) => {
if (!report || !baseline) return null
const reportValue = Number(report[field])
const baselineValue = Number(baseline[field])
if (!Number.isFinite(reportValue) || !Number.isFinite(baselineValue)) return null
return reportValue - baselineValue
}
const renderMetadata = (report) => {
if (!report?.metadata) return []
const metadata = report.metadata
return [
`Concurrency: ${metadata.concurrency ?? 'n/a'}`,
`Effort: ${metadata.effort ?? 'n/a'}`,
...(metadata.maxIterations !== undefined ? [`Max iterations: ${metadata.maxIterations}`] : []),
`Scenario limit used: ${metadata.scenarioLimitUsed ?? 'n/a'}`,
'',
]
}
const renderSummaryTable = (report, options = {}) => {
if (!report) return ['Summary table unavailable.']
const includeDelta = Boolean(options.includeDelta)
const baseline = options.baseline ?? null
const rows = [
['Samples', 'sampleCount', 'integer'],
['Succeeded', 'succeeded', 'integer'],
['Failed', 'failed', 'integer'],
['Improved', 'improved', 'integer'],
['Clean', 'clean', 'integer'],
['Initial DRC', 'totalInitialDrcCount', 'integer'],
['Final DRC', 'totalFinalDrcCount', 'integer'],
['DRC improvement', 'totalImprovement', 'integer'],
['Total solve time', 'totalSolveTimeMs', 'duration'],
['Average solve time', 'averageSolveTimeMs', 'duration'],
]
const renderValue = (value, type) =>
type === 'duration' ? formatDuration(Number(value)) : String(value ?? 'n/a')
const renderDelta = (field, type) => {
const delta = deltaValue(report, baseline, field)
if (delta === null) return 'n/a'
return type === 'duration' ? formatSignedDuration(delta) : formatSignedInteger(delta)
}
const table = includeDelta
? [
'| Metric | PR Value | Delta vs Main |',
'| --- | --- | --- |',
...rows.map(([label, field, type]) => `| ${label} | ${renderValue(report[field], type)} | ${renderDelta(field, type)} |`),
]
: [
'| Metric | Value |',
'| --- | --- |',
...rows.map(([label, field, type]) => `| ${label} | ${renderValue(report[field], type)} |`),
]
return [
...renderMetadata(report),
...table,
]
}
const prText = readText('benchmark-result-pr.txt') ?? readText('benchmark-result.txt') ?? '(benchmark output unavailable)'
const mainText = readText('benchmark-result-main.txt') ?? '(main benchmark output unavailable)'
const prReport = readJson('benchmark-result-pr.json') ?? readJson('benchmark-result.json')
const mainReport = readJson('benchmark-result-main.json')
const runUrl = `${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}`
const jobStatus = '${{ job.status }}'
const benchmarkFailed = jobStatus !== 'success'
const body = [
benchmarkFailed ? '## Benchmark Failed' : '## Benchmark Results',
'',
...(benchmarkFailed
? [
`Benchmark workflow ended with **${jobStatus}** before completion.`,
'',
]
: []),
'<details>',
'<summary>Main Branch Results</summary>',
'',
...renderSummaryTable(mainReport, { includeDelta: false }),
'',
'<details>',
'<summary>Raw output</summary>',
'',
'```',
truncate(mainText, 25000),
'```',
'</details>',
'</details>',
'',
'<details open>',
'<summary>PR Results</summary>',
'',
...renderSummaryTable(prReport, { includeDelta: true, baseline: mainReport }),
'',
'<details>',
'<summary>Raw output</summary>',
'',
'```',
truncate(prText, 25000),
'```',
'</details>',
'</details>',
'',
`Workflow: [View run](${runUrl})`,
`Artifact: ${runUrl}`,
].join('\n')
const finalBody = body.length > maxLength ? truncate(body, maxLength) : body
await github.rest.issues.updateComment({
owner: context.repo.owner,
repo: context.repo.repo,
comment_id: Number('${{ steps.parse.outputs.status_comment_id }}'),
body: finalBody,
})