faf6097079578301ebccd00c07ae0e028998f3c9 · mi355 #1
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| name: Parity Report | |
| run-name: "${{ inputs.baseline_sha && format('{0} vs {1}', inputs.sha || 'latest', inputs.baseline_sha) || inputs.csv_name || inputs.pr_id && format('PR {0}', inputs.pr_id) || inputs.sha || 'latest' }} · ${{ inputs.arch || 'mi355, mi300, mi200' }}" | |
| on: | |
| workflow_dispatch: | |
| inputs: | |
| # download_testlogs flags | |
| sha: | |
| description: 'Commit SHA to pull test results for. Example: 67f1ccf46a966e75f37facd497a03f7d1bd72982. Leave empty for latest green on main.' | |
| required: false | |
| type: string | |
| baseline_sha: | |
| description: 'Baseline commit SHA to compare against (same workflow/arch). Produces a commit-vs-commit report instead of ROCm-vs-CUDA.' | |
| required: false | |
| type: string | |
| pr_id: | |
| description: 'Pull request number (alternative to SHA, uses latest commit). Example: 176306' | |
| required: false | |
| type: string | |
| arch: | |
| description: 'ROCm architectures, comma or space separated. Options: mi355, mi300, mi200, nightly, navi31. Example: "nightly, mi355" or "mi300"' | |
| required: false | |
| default: 'mi355, mi300, mi200' | |
| type: string | |
| exclude_distributed: | |
| description: 'Exclude distributed tests (auto-excluded for navi31)' | |
| required: false | |
| default: false | |
| type: boolean | |
| exclude_inductor: | |
| description: 'Exclude inductor tests (auto-excluded for navi31)' | |
| required: false | |
| default: false | |
| type: boolean | |
| exclude_default: | |
| description: 'Exclude default tests' | |
| required: false | |
| default: false | |
| type: boolean | |
| include_logs: | |
| description: 'Download and include CI log files (.txt) in artifact zip' | |
| required: false | |
| default: true | |
| type: boolean | |
| skip_rocm: | |
| description: 'Skip downloading ROCm test results (generate CUDA-only report)' | |
| required: false | |
| default: false | |
| type: boolean | |
| skip_cuda: | |
| description: 'Skip downloading CUDA test results (generate ROCm-only report)' | |
| required: false | |
| default: false | |
| type: boolean | |
| # summarize_xml_testreports flags | |
| set1_name: | |
| description: 'Label for ROCm columns in output CSV. Examples: rocm, nightly, mi300. Default: rocm' | |
| required: false | |
| default: 'rocm' | |
| type: string | |
| set2_name: | |
| description: 'Label for CUDA columns in output CSV. Examples: cuda, trunk. Default: cuda' | |
| required: false | |
| default: 'cuda' | |
| type: string | |
| csv_name: | |
| description: 'Custom prefix for output filenames and artifacts. Default: YYYYMMDD_all_tests_status' | |
| required: false | |
| type: string | |
| include_inductor_periodic: | |
| description: 'Download inductor-periodic benchmark artifacts (separate from parity CSV)' | |
| required: false | |
| default: false | |
| type: boolean | |
| include_xml: | |
| description: 'Include raw XML test reports in artifact zip (WARNING: drastically increases artifact size ~10x)' | |
| required: false | |
| default: false | |
| type: boolean | |
| auto_classify: | |
| description: 'Auto-classify skip reasons for SKIPPED/MISSED tests in the output CSV' | |
| required: false | |
| default: false | |
| type: boolean | |
| jobs: | |
| setup-matrix: | |
| runs-on: ubuntu-latest | |
| outputs: | |
| arch-matrix: ${{ steps.parse.outputs.matrix }} | |
| prefix: ${{ steps.parse.outputs.prefix }} | |
| steps: | |
| - name: Parse arch input into matrix | |
| id: parse | |
| run: | | |
| ARCHS="${{ inputs.arch }}" | |
| ARCHS=$(echo "$ARCHS" | tr ',[:space:]' '\n' | sed '/^$/d' | tr '\n' ' ') | |
| JSON=$(echo "$ARCHS" | tr ' ' '\n' | sed '/^$/d' | sed 's/^/"/;s/$/"/' | paste -sd',' | sed 's/^/[/;s/$/]/') | |
| echo "matrix=$JSON" >> "$GITHUB_OUTPUT" | |
| echo "Architectures: $JSON" | |
| if [ -n "${{ inputs.csv_name }}" ]; then | |
| PREFIX="${{ inputs.csv_name }}" | |
| elif [ -n "${{ inputs.sha }}" ]; then | |
| PREFIX="${{ inputs.sha }}" | |
| elif [ -n "${{ inputs.pr_id }}" ]; then | |
| PREFIX="${{ inputs.pr_id }}" | |
| else | |
| PREFIX="parity" | |
| fi | |
| PREFIX=$(echo "$PREFIX" | xargs) | |
| echo "prefix=$PREFIX" >> "$GITHUB_OUTPUT" | |
| echo "Artifact prefix: $PREFIX" | |
| generate-parity: | |
| needs: setup-matrix | |
| runs-on: ubuntu-latest | |
| strategy: | |
| fail-fast: false | |
| matrix: | |
| arch: ${{ fromJson(needs.setup-matrix.outputs.arch-matrix) }} | |
| steps: | |
| - name: Checkout | |
| uses: actions/checkout@v4 | |
| - name: Set up Python | |
| uses: actions/setup-python@v5 | |
| with: | |
| python-version: '3.10' | |
| - name: Install dependencies | |
| working-directory: .automation_scripts/pytorch-unit-test-scripts | |
| run: pip install -r requirements.txt | |
| - name: Download artifacts | |
| working-directory: .automation_scripts/pytorch-unit-test-scripts | |
| env: | |
| GITHUB_TOKEN: ${{ secrets.IFU_GITHUB_TOKEN }} | |
| AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} | |
| AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} | |
| run: | | |
| ARGS="--arch ${{ matrix.arch }}" | |
| if [ -n "${{ inputs.sha }}" ]; then | |
| ARGS="$ARGS --sha1 ${{ inputs.sha }}" | |
| fi | |
| if [ -n "${{ inputs.pr_id }}" ]; then | |
| ARGS="$ARGS --pr_id ${{ inputs.pr_id }}" | |
| fi | |
| if [ "${{ inputs.exclude_distributed }}" = "true" ]; then | |
| ARGS="$ARGS --exclude_distributed" | |
| fi | |
| if [ "${{ inputs.exclude_inductor }}" = "true" ]; then | |
| ARGS="$ARGS --exclude_inductor" | |
| fi | |
| if [ "${{ inputs.exclude_default }}" = "true" ]; then | |
| ARGS="$ARGS --exclude_default" | |
| fi | |
| ARGS="$ARGS --ignore_status" | |
| if [ "${{ inputs.include_logs }}" != "true" ]; then | |
| ARGS="$ARGS --artifacts_only" | |
| fi | |
| if [ "${{ inputs.skip_rocm }}" = "true" ]; then | |
| ARGS="$ARGS --no_rocm" | |
| fi | |
| if [ "${{ inputs.skip_cuda }}" = "true" ]; then | |
| ARGS="$ARGS --no_cuda" | |
| fi | |
| if [ "${{ inputs.include_inductor_periodic }}" = "true" ]; then | |
| ARGS="$ARGS --include_inductor_periodic" | |
| fi | |
| if [ -n "${{ inputs.baseline_sha }}" ]; then | |
| ARGS="$ARGS --baseline_sha ${{ inputs.baseline_sha }}" | |
| fi | |
| echo "Running: python3 ./download_testlogs $ARGS" | |
| python3 ./download_testlogs $ARGS 2>&1 | tee download_${{ matrix.arch }}.log | |
| - name: Identify output folder | |
| id: folder | |
| working-directory: .automation_scripts/pytorch-unit-test-scripts | |
| run: | | |
| FOLDER=$(ls -dt [0-9]*_[0-9a-f]*/ 2>/dev/null | head -1 | sed 's:/$::') | |
| if [ -z "$FOLDER" ]; then | |
| echo "ERROR: No output folder found" | |
| exit 1 | |
| fi | |
| echo "folder=$FOLDER" >> "$GITHUB_OUTPUT" | |
| SHA=$(echo "$FOLDER" | grep -oP '[0-9a-f]{40}') | |
| echo "sha=$SHA" >> "$GITHUB_OUTPUT" | |
| DATE=$(TZ='America/Los_Angeles' date '+%Y%m%d') | |
| echo "date=$DATE" >> "$GITHUB_OUTPUT" | |
| mv download_${{ matrix.arch }}.log "$FOLDER/" 2>/dev/null || true | |
| echo "Output folder: $FOLDER, SHA: $SHA, Date: $DATE" | |
| - name: Generate CSV | |
| working-directory: .automation_scripts/pytorch-unit-test-scripts | |
| run: | | |
| FOLDER="${{ steps.folder.outputs.folder }}" | |
| DATE="${{ steps.folder.outputs.date }}" | |
| ARCH="${{ matrix.arch }}" | |
| if [ -n "${{ inputs.csv_name }}" ]; then | |
| CSV_NAME="${{ inputs.csv_name }}_${ARCH}" | |
| else | |
| CSV_NAME="${DATE}_all_tests_status_${ARCH}" | |
| fi | |
| ARGS="--set1 $FOLDER/rocm_xml" | |
| if [ -n "${{ inputs.baseline_sha }}" ]; then | |
| ARGS="$ARGS --set2 $FOLDER/baseline_xml" | |
| CURRENT_SHORT=$(echo "${{ steps.folder.outputs.sha }}" | cut -c1-8) | |
| BASELINE_SHORT=$(echo "${{ inputs.baseline_sha }}" | cut -c1-8) | |
| ARGS="$ARGS --set1_name ${CURRENT_SHORT}" | |
| ARGS="$ARGS --set2_name ${BASELINE_SHORT}" | |
| else | |
| if [ "${{ inputs.skip_cuda }}" != "true" ]; then | |
| ARGS="$ARGS --set2 $FOLDER/cuda_xml" | |
| fi | |
| ARGS="$ARGS --set1_name ${{ inputs.set1_name }}" | |
| ARGS="$ARGS --set2_name ${{ inputs.set2_name }}" | |
| fi | |
| ARGS="$ARGS --output_csv $FOLDER/${CSV_NAME}.csv" | |
| SHORT_ARCH=$(echo "$ARCH" | sed 's/^mi//') | |
| if [ -n "${{ inputs.csv_name }}" ]; then | |
| RT_NAME="${{ inputs.csv_name }}_running_time_${SHORT_ARCH}" | |
| else | |
| RT_NAME="${DATE}_running_time_${SHORT_ARCH}" | |
| fi | |
| ARGS="$ARGS --test_file_running_time_output_csv $FOLDER/${RT_NAME}.csv" | |
| echo "Running: python3 -u summarize_xml_testreports.py $ARGS" | |
| python3 -u summarize_xml_testreports.py $ARGS 2>&1 | tee "$FOLDER/xml_processing_${DATE}.log" | |
| - name: Auto-classify skip reasons | |
| if: ${{ inputs.auto_classify }} | |
| working-directory: .automation_scripts/pytorch-unit-test-scripts | |
| run: | | |
| FOLDER="${{ steps.folder.outputs.folder }}" | |
| CSV=$(find "$FOLDER" -maxdepth 1 -name "*.csv" ! -name "*_running_time*" | head -1) | |
| if [ -n "$CSV" ]; then | |
| echo "Auto-classifying skip reasons in $CSV" | |
| python3 auto_classify_skip_reasons.py -i "$CSV" -o "$CSV" --report 2>&1 | |
| else | |
| echo "No parity CSV found in $FOLDER, skipping auto-classify" | |
| fi | |
| - name: Collect upload paths | |
| id: upload-paths | |
| run: | | |
| FOLDER=".automation_scripts/pytorch-unit-test-scripts/${{ steps.folder.outputs.folder }}" | |
| PATHS="${FOLDER}/*.csv | |
| ${FOLDER}/*.log | |
| ${FOLDER}/*.txt | |
| ${FOLDER}/inductor_periodic_rocm_dir/ | |
| ${FOLDER}/inductor_periodic_cuda_dir/" | |
| if [ "${{ inputs.include_xml }}" = "true" ]; then | |
| PATHS="${PATHS} | |
| ${FOLDER}/rocm_xml/ | |
| ${FOLDER}/cuda_xml/ | |
| ${FOLDER}/baseline_xml/" | |
| fi | |
| echo "paths<<EOF" >> "$GITHUB_OUTPUT" | |
| echo "$PATHS" >> "$GITHUB_OUTPUT" | |
| echo "EOF" >> "$GITHUB_OUTPUT" | |
| - name: Upload artifacts | |
| uses: actions/upload-artifact@v4 | |
| with: | |
| name: ${{ needs.setup-matrix.outputs.prefix }}-results-${{ matrix.arch }} | |
| retention-days: 1 | |
| path: ${{ steps.upload-paths.outputs.paths }} | |
| summarize: | |
| needs: [setup-matrix, generate-parity] | |
| if: ${{ !cancelled() }} | |
| runs-on: ubuntu-latest | |
| steps: | |
| - name: Checkout | |
| uses: actions/checkout@v4 | |
| - name: Set up Python | |
| uses: actions/setup-python@v5 | |
| with: | |
| python-version: '3.10' | |
| - name: Download all per-arch CSV artifacts | |
| uses: actions/download-artifact@v4 | |
| with: | |
| pattern: ${{ needs.setup-matrix.outputs.prefix }}-results-* | |
| path: artifacts | |
| - name: Build parity report | |
| working-directory: .automation_scripts/pytorch-unit-test-scripts | |
| run: | | |
| ARCHS="${{ inputs.arch }}" | |
| SHA="${{ inputs.sha }}" | |
| PR_ID="${{ inputs.pr_id }}" | |
| BASELINE_SHA="${{ inputs.baseline_sha }}" | |
| if [ -n "$BASELINE_SHA" ]; then | |
| SET1=$(echo "$SHA" | cut -c1-8) | |
| SET2=$(echo "$BASELINE_SHA" | cut -c1-8) | |
| else | |
| SET1="${{ inputs.set1_name }}" | |
| SET2="${{ inputs.set2_name }}" | |
| fi | |
| ARCHS=$(echo "$ARCHS" | tr ',[:space:]' ' ') | |
| PREFIX=$(echo "${{ needs.setup-matrix.outputs.prefix }}" | xargs) | |
| CSV_ARGS=() | |
| ARCH_ARGS=() | |
| for ARCH in $ARCHS; do | |
| ARTIFACT_DIR="../artifacts/${PREFIX}-results-${ARCH}" | |
| CSV=$(find "$ARTIFACT_DIR"/ -maxdepth 2 -name "*.csv" ! -name "*_running_time*" ! -name "*_summary*" 2>/dev/null | head -1) | |
| if [ -z "$CSV" ]; then | |
| echo "WARNING: No CSV found for $ARCH, skipping" | |
| continue | |
| fi | |
| echo "Found CSV for $ARCH: $CSV" | |
| CSV_ARGS+=("$CSV") | |
| ARCH_ARGS+=("$ARCH") | |
| done | |
| if [ ${#CSV_ARGS[@]} -eq 0 ]; then | |
| echo "::warning::No CSVs found for any architecture — some or all generate-parity jobs may have failed" | |
| echo "## ⚠ No CSVs produced" >> "$GITHUB_STEP_SUMMARY" | |
| echo "No parity CSVs were found. Check the generate-parity job logs for errors." >> "$GITHUB_STEP_SUMMARY" | |
| exit 0 | |
| fi | |
| ARGS=(--csv "${CSV_ARGS[@]}" --arch "${ARCH_ARGS[@]}") | |
| ARGS+=(--set1_name "$SET1" --set2_name "$SET2") | |
| if [ -n "$SHA" ]; then | |
| ARGS+=(--sha "$SHA") | |
| else | |
| DETECTED_SHA=$(basename "$(find ../artifacts/ -name '*.csv' | head -1)" | grep -oP '[0-9a-f]{40}' || true) | |
| if [ -n "$DETECTED_SHA" ]; then | |
| ARGS+=(--sha "$DETECTED_SHA") | |
| fi | |
| fi | |
| if [ -n "$PR_ID" ]; then | |
| ARGS+=(--pr_id "$PR_ID") | |
| fi | |
| OUTPUT="${PREFIX}_summary" | |
| ARGS+=(--output "$OUTPUT") | |
| echo "Running: python3 generate_summary.py ${ARGS[*]}" | |
| python3 generate_summary.py "${ARGS[@]}" | |
| cat "${OUTPUT}.md" >> "$GITHUB_STEP_SUMMARY" | |
| - name: Add artifact links to summary | |
| env: | |
| GH_TOKEN: ${{ github.token }} | |
| run: | | |
| ARTIFACTS_JSON=$(gh api repos/${{ github.repository }}/actions/runs/${{ github.run_id }}/artifacts --paginate -q '.artifacts[] | {name, id}') | |
| RUN_URL="${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}/artifacts" | |
| { | |
| echo "" | |
| echo "### ARTIFACTS" | |
| echo "" | |
| echo "| Artifact | Link |" | |
| echo "| --- | --- |" | |
| echo "$ARTIFACTS_JSON" | jq -r '"| \(.name) | [Download]('"${RUN_URL}"'/\(.id)) |"' | |
| echo "" | |
| } >> "$GITHUB_STEP_SUMMARY" |