Allow adapters to customize headers for fetch requests #246
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
name: Validate Changesets | |
on: | |
pull_request_target: | |
paths: | |
- '.changeset/*.md' | |
concurrency: | |
group: validate-changesets-${{ github.event.pull_request.number }} | |
cancel-in-progress: true | |
jobs: | |
validate-changesets: | |
runs-on: ubuntu-latest | |
permissions: | |
contents: read | |
models: read | |
pull-requests: write | |
steps: | |
- name: Checkout | |
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0 | |
with: | |
fetch-depth: 0 | |
- name: Fetch PR head | |
run: | | |
git fetch origin pull/${{ github.event.pull_request.number }}/head:pr-${{ github.event.pull_request.number }} | |
- name: Restore response cache | |
uses: actions/cache/restore@6f8efc29b200d32929f49075959781ed54ec270c # v3.5.0 | |
id: cache-restore | |
with: | |
path: /tmp/llm_cache | |
key: llm-validation-pr-${{ github.event.pull_request.number }}-${{ github.run_id }} | |
restore-keys: | | |
llm-validation-pr-${{ github.event.pull_request.number }}- | |
- name: Set up Node.js | |
uses: actions/setup-node@2028fbc5c25fe9cf00d9f06a71cc4710d4507903 # v6.0.0 | |
with: | |
node-version: '20' | |
- name: Install pnpm | |
uses: pnpm/action-setup@41ff72655975bd51cab0327fa583b6e92b6d3061 # v4.2.0 | |
- name: Install dependencies | |
run: pnpm install | |
- name: Set up Python | |
uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 | |
with: | |
python-version: '3.14.0' | |
- name: Install llm CLI | |
run: | | |
python -m pip install --upgrade pip | |
pip install "llm-github-models" | |
- name: Collect and validate changesets | |
id: validate | |
env: | |
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} | |
run: | | |
echo "Starting changeset validation" | |
# Find changed changeset files | |
echo "::debug::Looking for changed files between ${{ github.event.pull_request.base.sha }} and ${{ github.event.pull_request.head.sha }}" | |
git diff --name-only --diff-filter=AM \ | |
"${{ github.event.pull_request.base.sha }}...${{ github.event.pull_request.head.sha }}" \ | |
-- '.changeset/*.md' | grep -v 'README.md' > changeset_files.txt || true | |
{ | |
echo "::debug::Found changeset files:" | |
cat changeset_files.txt || true | |
} >&2 | |
if [ ! -s changeset_files.txt ]; then | |
echo "::notice::No changeset files to validate" | |
echo "has_changesets=false" >> "$GITHUB_OUTPUT" | |
exit 0 | |
fi | |
echo "has_changesets=true" >> "$GITHUB_OUTPUT" | |
echo "::notice::Found $(wc -l < changeset_files.txt) changeset file(s) to validate" | |
# Collect changeset contents using XML-style tags for better AI parsing | |
changesets="" | |
total_size=0 | |
max_total_size=51200 # 50KB total limit | |
while IFS= read -r file; do | |
echo "::debug::Reading changeset file: $file" | |
# Read file from PR head SHA without checking it out (safer for pull_request_target) | |
if content=$(git show "${{ github.event.pull_request.head.sha }}:$file" 2>/dev/null); then | |
content_size=${#content} | |
new_total=$((total_size + content_size)) | |
if [ $new_total -gt $max_total_size ]; then | |
echo "::error::Total changeset size exceeds 50KB limit. Please reduce the amount of content in your changesets." | |
exit 1 | |
fi | |
total_size=$new_total | |
changesets=$(printf "%s<changeset file=\"%s\">\n%s\n</changeset>\n\n" "$changesets" "$file" "$content") | |
else | |
echo "::warning::Changeset file not found: $file" | |
fi | |
done < changeset_files.txt | |
echo "::debug::Total changeset content size: $total_size bytes" | |
{ | |
echo "::debug::Collected changesets content (first 500 chars):" | |
echo "$changesets" | head -c 500 | |
} >&2 | |
# Read the validation prompt from file | |
prompt_template=$(cat .github/workflows/changeset-validation-prompt.txt) | |
# Create the full prompt with PR context and changesets | |
prompt="${prompt_template} | |
<pr_context> | |
PR Title: ${{ github.event.pull_request.title }} | |
</pr_context> | |
$changesets" | |
# Generate cache key from prompt content | |
cache_dir="/tmp/llm_cache" | |
mkdir -p "$cache_dir" | |
cache_key=$(echo "$prompt" | sha256sum | cut -d' ' -f1) | |
cache_file="${cache_dir}/${cache_key}.json" | |
echo "::debug::Cache key (prompt hash): $cache_key" | |
# Check for cached response | |
if [ -f "$cache_file" ]; then | |
echo "::notice::Cache hit! Using cached validation response" | |
response=$(cat "$cache_file") | |
else | |
echo "::debug::Cache miss - calling LLM API" | |
need_llm_call=true | |
fi | |
schema='{ | |
"type": "object", | |
"properties": { | |
"overall_valid": { | |
"type": "boolean", | |
"description": "Whether all changesets pass validation" | |
}, | |
"files": { | |
"type": "array", | |
"items": { | |
"type": "object", | |
"properties": { | |
"file": { | |
"type": "string", | |
"description": "Path to the changeset file" | |
}, | |
"valid": { | |
"type": "boolean", | |
"description": "Whether this specific file passes validation" | |
}, | |
"issues": { | |
"type": "array", | |
"items": { | |
"type": "object", | |
"properties": { | |
"quote": {"type": "string", "description": "The problematic text from the changeset"}, | |
"errors": { | |
"type": "array", | |
"items": {"type": "string"}, | |
"description": "Error messages for this quoted text" | |
}, | |
"suggestions": { | |
"type": "array", | |
"items": {"type": "string"}, | |
"description": "Suggestions for this quoted text" | |
} | |
}, | |
"required": ["quote", "errors", "suggestions"], | |
"additionalProperties": false | |
}, | |
"description": "Issues grouped by quoted text" | |
}, | |
"general_errors": { | |
"type": "array", | |
"items": {"type": "string"}, | |
"description": "General errors not tied to specific text" | |
}, | |
"general_suggestions": { | |
"type": "array", | |
"items": {"type": "string"}, | |
"description": "General suggestions not tied to specific text" | |
} | |
}, | |
"required": ["file", "valid", "issues", "general_errors", "general_suggestions"], | |
"additionalProperties": false | |
}, | |
"description": "Per-file validation results" | |
} | |
}, | |
"required": ["overall_valid", "files"], | |
"additionalProperties": false | |
}' | |
# Only call LLM if we don't have a cached response | |
if [ "$need_llm_call" = "true" ]; then | |
echo "::debug::Running llm CLI" | |
response=$(echo "$prompt" | llm prompt -m github/gpt-4o --schema "$schema" --no-stream 2>&1) || { | |
echo "::error::Failed to run llm CLI. Exit code: $?" | |
echo "::debug::Error output: $response" | |
response='{"overall_valid": false, "files": [{"file": "unknown", "valid": false, "issues": [], "general_errors": ["Failed to get AI response"], "general_suggestions": []}]}' | |
} | |
# Save successful response to cache | |
if echo "$response" | jq -e '.overall_valid != null' > /dev/null 2>&1; then | |
echo "$response" > "$cache_file" | |
echo "::debug::Response cached for future use" | |
fi | |
fi | |
{ | |
echo "::debug::LLM response received (first 500 chars):" | |
echo "$response" | head -c 500 | |
} >&2 | |
# Save response for parsing | |
echo "$response" > validation_response.json | |
echo "::debug::Response saved to validation_response.json" | |
# Check for shallow headings in each changeset | |
echo "::debug::Checking for shallow headings (h1, h2, h3) in changesets" | |
while IFS= read -r file; do | |
if [ -f "$file" ]; then | |
# Make path absolute for the script | |
abs_file="$(pwd)/$file" | |
# Run the script and capture output | |
if heading_output=$(pnpm --filter astro-scripts has-shallow-headings "$abs_file" 2>&1); then | |
echo "::debug::$file: No shallow headings found" | |
else | |
# Filter output to only lines that look like markdown headings (start with #) | |
# This removes pnpm wrapper noise | |
heading_output=$(pnpm --filter astro-scripts has-shallow-headings "$abs_file" 2>&1 | grep '^#' || true) | |
if [ -n "$heading_output" ]; then | |
echo "::warning::$file: Contains forbidden shallow headings" | |
# Add heading errors to the validation response | |
# The script outputs the offending headings, one per line | |
while IFS= read -r heading; do | |
if [ -n "$heading" ]; then | |
error_msg="Forbidden heading found: '$heading'. Headings shallower than h4 (####) break changelog formatting. Please use h4 or deeper." | |
echo "::debug::Adding heading error for $file: $heading" | |
# Add error to the file's general_errors array in the JSON | |
jq --arg file "$file" --arg error "$error_msg" ' | |
.overall_valid = false | | |
.files |= map( | |
if .file == $file then | |
.valid = false | | |
.general_errors += [$error] | |
else . end | |
) | |
' validation_response.json > validation_response.tmp.json | |
mv validation_response.tmp.json validation_response.json | |
fi | |
done <<< "$heading_output" | |
else | |
echo "::debug::$file: Script failed but no headings found (might be other error)" | |
fi | |
fi | |
fi | |
done < changeset_files.txt | |
# Extract validation status - check both overall_valid AND actual errors | |
has_errors=false | |
# Check if any file has issues or errors | |
if jq -e '.files[]? | select((.issues // [] | length > 0) or (.general_errors // [] | length > 0))' validation_response.json > /dev/null 2>&1; then | |
has_errors=true | |
fi | |
# Validation passes only if overall_valid is true AND there are no errors | |
if jq -e '.overall_valid == true' validation_response.json > /dev/null 2>&1 && [ "$has_errors" = "false" ]; then | |
echo "::notice::Validation passed" | |
echo "valid=true" >> "$GITHUB_OUTPUT" | |
else | |
echo "::warning::Validation failed or has errors" | |
echo "valid=false" >> "$GITHUB_OUTPUT" | |
# Show per-file errors if present | |
{ | |
echo "::debug::Files with issues:" | |
jq -r '.files[]? | select((.issues // [] | length > 0) or (.general_errors // [] | length > 0)) | "File: " + .file + " - Issues found"' validation_response.json 2>/dev/null || true | |
} >&2 | |
fi | |
- name: Comment validation results on PR | |
id: comment | |
if: steps.validate.outputs.has_changesets == 'true' | |
uses: actions/github-script@f28e40c7f34bde8b3046d885e986cb6290c5673b # v7.1.0 | |
with: | |
script: | | |
const fs = require('fs'); | |
let validation; | |
try { | |
const raw = fs.readFileSync('validation_response.json', 'utf8'); | |
validation = JSON.parse(raw); | |
} catch (e) { | |
validation = { | |
overall_valid: false, | |
files: [{ | |
file: "unknown", | |
valid: false, | |
issues: [], | |
general_errors: ["Failed to parse validation response"], | |
general_suggestions: [] | |
}] | |
}; | |
} | |
// Find existing comment first | |
const { data: comments } = await github.rest.issues.listComments({ | |
owner: context.repo.owner, | |
repo: context.repo.repo, | |
issue_number: context.issue.number, | |
}); | |
const marker = "## 📝 Changeset Validation Results"; | |
const existingComment = comments.find(c => | |
c.user?.type === "Bot" && c.body?.includes(marker) | |
); | |
// Count total errors across all files | |
let totalErrors = 0; | |
if (validation.files) { | |
totalErrors = validation.files.reduce((sum, file) => { | |
let fileErrors = 0; | |
if (file.issues) { | |
fileErrors += file.issues.reduce((issueSum, issue) => { | |
return issueSum + (issue.errors ? issue.errors.length : 0); | |
}, 0); | |
} | |
if (file.general_errors) { | |
fileErrors += file.general_errors.length; | |
} | |
return sum + fileErrors; | |
}, 0); | |
} | |
// Extract validation status - check both overall_valid AND actual errors | |
const overallValid = validation.overall_valid && totalErrors === 0; | |
// If validation passes and there are no errors, don't comment | |
if (overallValid) { | |
// Delete existing comment if it exists | |
if (existingComment) { | |
await github.rest.issues.deleteComment({ | |
owner: context.repo.owner, | |
repo: context.repo.repo, | |
comment_id: existingComment.id | |
}); | |
console.log("Deleted existing comment - validation passed"); | |
} | |
return; // Don't create a new comment | |
} | |
// Only comment if there are actual errors | |
let comment = "## 📝 Changeset Validation Results\n\n"; | |
comment += "❌ **Changeset validation failed**\n\n"; | |
if (validation.files) { | |
const failedFiles = validation.files.filter(file => { | |
if (!file.valid) return true; | |
// Check for errors in issues array | |
if (file.issues && file.issues.some(issue => issue.errors && issue.errors.length > 0)) { | |
return true; | |
} | |
// Check for general errors | |
if (file.general_errors && file.general_errors.length > 0) { | |
return true; | |
} | |
return false; | |
}); | |
if (failedFiles.length > 0) { | |
comment += "### Issues Found:\n\n"; | |
// Get the repository URL for proper file linking | |
const repoUrl = "https://github.com/" + context.repo.owner + "/" + context.repo.repo; | |
const branch = context.payload.pull_request.head.ref; | |
failedFiles.forEach(file => { | |
// Create proper GitHub file link with branch | |
const fileUrl = repoUrl + "/blob/" + branch + "/" + file.file; | |
comment += "#### [`" + file.file + "`](" + fileUrl + ")\n\n"; | |
// Display issues grouped by quote | |
if (file.issues && file.issues.length > 0) { | |
file.issues.forEach(issue => { | |
if (issue.quote) { | |
comment += "**Issue with:** '" + issue.quote + "'\n\n"; | |
if (issue.errors && issue.errors.length > 0) { | |
if (issue.errors.length === 1) { | |
comment += "❌ " + issue.errors[0] + "\n\n"; | |
} else { | |
comment += "❌ **Errors:**\n"; | |
issue.errors.forEach(error => { | |
comment += " • " + error + "\n"; | |
}); | |
comment += "\n"; | |
} | |
} | |
if (issue.suggestions && issue.suggestions.length > 0) { | |
if (issue.suggestions.length === 1) { | |
comment += "💡 " + issue.suggestions[0] + "\n\n"; | |
} else { | |
comment += "💡 **Suggestions:**\n"; | |
issue.suggestions.forEach(suggestion => { | |
comment += " • " + suggestion + "\n"; | |
}); | |
comment += "\n"; | |
} | |
} | |
} | |
}); | |
} | |
// Display general errors (not tied to specific quotes) | |
if (file.general_errors && file.general_errors.length > 0) { | |
comment += "**General Errors:**\n"; | |
file.general_errors.forEach(error => { | |
comment += "- " + error + "\n"; | |
}); | |
comment += "\n"; | |
} | |
// Display general suggestions (not tied to specific quotes) | |
if (file.general_suggestions && file.general_suggestions.length > 0) { | |
comment += "**General Suggestions:**\n"; | |
file.general_suggestions.forEach(suggestion => { | |
comment += "- " + suggestion + "\n"; | |
}); | |
comment += "\n"; | |
} | |
}); | |
} | |
} | |
comment += "---\n"; | |
comment += "*📖 See [Astro's changeset guide](https://contribute.docs.astro.build/docs-for-code-changes/changesets/) for details.*"; | |
// Update or create comment only for failures | |
if (existingComment) { | |
await github.rest.issues.updateComment({ | |
owner: context.repo.owner, | |
repo: context.repo.repo, | |
comment_id: existingComment.id, | |
body: comment | |
}); | |
} else { | |
await github.rest.issues.createComment({ | |
owner: context.repo.owner, | |
repo: context.repo.repo, | |
issue_number: context.issue.number, | |
body: comment | |
}); | |
} | |
// Set output to indicate validation status | |
if (!overallValid) { | |
core.setOutput("validation_failed", "true"); | |
core.debug("Validation failed - comment posted but job will continue"); | |
} | |
- name: Save response cache | |
uses: actions/cache/save@6f8efc29b200d32929f49075959781ed54ec270c # v3.5.0 | |
if: always() | |
with: | |
path: /tmp/llm_cache | |
key: llm-validation-pr-${{ github.event.pull_request.number }}-${{ github.run_id }} |