Skip to content

deps(go): bump sigs.k8s.io/structured-merge-diff/v6 to v6.4.0 #901

deps(go): bump sigs.k8s.io/structured-merge-diff/v6 to v6.4.0

deps(go): bump sigs.k8s.io/structured-merge-diff/v6 to v6.4.0 #901

name: CI - OpenShift E2E Tests
# Permissions needed for various jobs
permissions:
contents: read
pull-requests: write # For posting comments on PRs
statuses: write # For reporting status on fork PR commits
# Cancel previous runs on the same PR to avoid resource conflicts
# Only group by PR number for legitimate triggers (pull_request, workflow_dispatch, /ok-to-test, or /retest comments)
# Regular comments get a unique group (run_id) so they don't cancel in-progress test runs
#
# Logic:
# - Regular comments (not /ok-to-test or /retest): unique group prevents cancellation of real tests
# - Valid triggers: group 'fma-e2e-openshift-{pr_number}' (can cancel previous runs for same PR)
# - Fallback chain for ID: pull_request.number -> issue.number -> run_id
#
# NOTE: Valid command list (/ok-to-test, /retest) must stay in sync with gate job validation
concurrency:
group: >-
${{
github.event_name == 'issue_comment' &&
!contains(github.event.comment.body, '/ok-to-test') &&
!contains(github.event.comment.body, '/retest')
&& format('comment-isolated-{0}', github.run_id)
|| format('fma-e2e-openshift-{0}',
github.event.pull_request.number
|| github.event.issue.number
|| github.run_id)
}}
cancel-in-progress: true
on:
pull_request:
branches:
- main
# Allow maintainers to trigger tests on fork PRs via /ok-to-test comment
issue_comment:
types: [created]
workflow_dispatch:
inputs:
skip_cleanup:
description: 'Skip cleanup after tests'
required: false
default: 'false'
jobs:
# Gate: Check permissions and handle /ok-to-test for fork PRs
# - Maintainers (write access): Tests run automatically
# - External contributors: Must wait for maintainer to comment /ok-to-test
gate:
runs-on: ubuntu-latest
outputs:
should_run: ${{ steps.check.outputs.should_run }}
pr_number: ${{ steps.check.outputs.pr_number }}
pr_head_sha: ${{ steps.check.outputs.pr_head_sha }}
is_fork_pr: ${{ steps.check.outputs.is_fork_pr }}
steps:
- name: Check permissions and /ok-to-test
id: check
uses: actions/github-script@3a2844b7e9c422d3c10d287c895573f7108da1b3 # v9.0.0
with:
script: |
// Helper to check if user has write access
async function hasWriteAccess(username) {
try {
const { data: permission } = await github.rest.repos.getCollaboratorPermissionLevel({
owner: context.repo.owner,
repo: context.repo.repo,
username: username
});
const privilegedRoles = ['admin', 'maintain', 'write'];
return privilegedRoles.includes(permission.permission);
} catch (e) {
console.log(`Could not get permissions for ${username}: ${e.message}`);
return false;
}
}
// Always run for workflow_dispatch
if (context.eventName === 'workflow_dispatch') {
core.setOutput('should_run', 'true');
core.setOutput('pr_number', '');
core.setOutput('pr_head_sha', context.sha);
core.setOutput('is_fork_pr', 'false');
return;
}
// Handle issue_comment event (/ok-to-test or /retest)
if (context.eventName === 'issue_comment') {
const comment = context.payload.comment.body.trim();
const issue = context.payload.issue;
// Only process /ok-to-test or /retest comments on PRs
if (!issue.pull_request) {
console.log('Comment is not on a PR, skipping');
core.setOutput('should_run', 'false');
return;
}
// NOTE: This list must stay in sync with concurrency group logic
const validCommands = ['/ok-to-test', '/retest'];
if (!validCommands.includes(comment)) {
console.log(`Comment "${comment}" is not a valid trigger command, skipping`);
core.setOutput('should_run', 'false');
return;
}
// Check if commenter has write access
const commenter = context.payload.comment.user.login;
const hasAccess = await hasWriteAccess(commenter);
if (!hasAccess) {
console.log(`User ${commenter} does not have write access, ignoring ${comment}`);
core.setOutput('should_run', 'false');
return;
}
// Get PR details to get head SHA
const { data: pr } = await github.rest.pulls.get({
owner: context.repo.owner,
repo: context.repo.repo,
pull_number: issue.number
});
// Check if PR is from a fork
const baseRepo = `${context.repo.owner}/${context.repo.repo}`;
const headRepo = pr.head.repo ? pr.head.repo.full_name : baseRepo;
const isFork = headRepo !== baseRepo;
console.log(`${comment} approved by ${commenter} for PR #${issue.number}`);
console.log(`PR head SHA: ${pr.head.sha}`);
console.log(`Is fork PR: ${isFork} (head: ${headRepo}, base: ${baseRepo})`);
core.setOutput('should_run', 'true');
core.setOutput('pr_number', issue.number.toString());
core.setOutput('pr_head_sha', pr.head.sha);
core.setOutput('is_fork_pr', isFork ? 'true' : 'false');
// Add reaction to acknowledge
await github.rest.reactions.createForIssueComment({
owner: context.repo.owner,
repo: context.repo.repo,
comment_id: context.payload.comment.id,
content: 'rocket'
});
// Post comment with link to the e2e workflow run
const runUrl = `https://github.com/${context.repo.owner}/${context.repo.repo}/actions/runs/${context.runId}`;
await github.rest.issues.createComment({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: issue.number,
body: `🚀 **E2E tests triggered by ${comment}**\n\n[View the OpenShift E2E workflow run](${runUrl})`
});
return;
}
// Handle pull_request event
const pr = context.payload.pull_request;
const prAuthor = pr.user.login;
const prNumber = pr.number;
const prHeadSha = pr.head.sha;
// Check if PR is from a fork
const baseRepo = `${context.repo.owner}/${context.repo.repo}`;
const headRepo = pr.head.repo ? pr.head.repo.full_name : baseRepo;
const isFork = headRepo !== baseRepo;
console.log(`PR #${prNumber} is from fork: ${isFork} (head: ${headRepo}, base: ${baseRepo})`);
core.setOutput('pr_number', prNumber.toString());
core.setOutput('pr_head_sha', prHeadSha);
core.setOutput('is_fork_pr', isFork ? 'true' : 'false');
// Check if PR author has write access
const isPrivileged = await hasWriteAccess(prAuthor);
console.log(`PR #${prNumber} author ${prAuthor}: privileged=${isPrivileged}`);
// Check if we already posted a bot comment
const comments = await github.rest.issues.listComments({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: prNumber
});
const botComment = comments.data.find(c =>
c.user.type === 'Bot' &&
c.body.includes('ok-to-test')
);
// Helper to safely post a comment (may fail on fork PRs due to permissions)
async function tryPostComment(body) {
try {
await github.rest.issues.createComment({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: prNumber,
body: body
});
return true;
} catch (e) {
// Fork PRs can't post comments on pull_request event (GitHub security restriction)
console.log(`Could not post comment (expected for fork PRs): ${e.message}`);
return false;
}
}
if (isPrivileged) {
// For maintainer/admin fork PRs, we need to trigger via /ok-to-test
// because fork PRs don't have access to secrets on pull_request event
if (isFork) {
console.log(`Maintainer fork PR detected - auto-triggering /ok-to-test for ${prAuthor}`);
core.setOutput('should_run', 'false'); // Don't run on pull_request event
// Auto-post /ok-to-test to trigger issue_comment workflow
if (!botComment) {
const posted = await tryPostComment(`/ok-to-test`);
if (!posted) {
console.log('Note: Maintainer will need to manually comment /ok-to-test');
}
}
return;
}
// Non-fork PR from maintainer - run directly
core.setOutput('should_run', 'true');
return;
}
// External contributor - post instructions and skip
console.log('External contributor PR - posting instructions');
core.setOutput('should_run', 'false');
if (!botComment) {
const posted = await tryPostComment(`👋 Thanks for your contribution!\n\nThis PR is from a fork, so the e2e tests require approval to run (they use cluster resources).\n\n**For maintainers/admins:** Comment \`/ok-to-test\` to trigger the e2e tests after reviewing the code.\n\n**For contributors:** Please wait for a maintainer or admin to approve running the tests.`);
if (!posted) {
console.log('Note: Could not post instructions comment on fork PR');
}
}
- name: Write workflow summary
if: always()
uses: actions/github-script@3a2844b7e9c422d3c10d287c895573f7108da1b3 # v9.0.0
with:
script: |
const shouldRun = '${{ steps.check.outputs.should_run }}';
if (shouldRun === 'true') {
core.summary.addRaw('**E2E tests will run** for this trigger.');
} else {
core.summary.addRaw('**E2E tests were skipped** (gate check did not pass for this trigger).');
}
await core.summary.write();
# Build the FMA controller image on GitHub-hosted runner
# Uses ko (Go-native image builder) and pushes to GHCR
# Note: Skip for fork PRs on pull_request event (no secrets access).
# For fork PRs, build-image runs via issue_comment trigger (/ok-to-test).
build-image:
needs: gate
if: |
needs.gate.outputs.should_run == 'true' &&
(needs.gate.outputs.is_fork_pr != 'true' || github.event_name != 'pull_request')
runs-on: ubuntu-latest
outputs:
image_tag: ${{ steps.build.outputs.image_tag }}
controller_image: ${{ steps.build.outputs.controller_image }}
requester_image: ${{ steps.build.outputs.requester_image }}
launcher_image: ${{ steps.build.outputs.launcher_image }}
steps:
- name: Checkout source
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
ref: ${{ needs.gate.outputs.pr_head_sha }}
- name: Set up Go
uses: actions/setup-go@4a3601121dd01d1626a1e23e37211e3254c1c06c # v6.4.0
with:
go-version: "1.25.7"
cache-dependency-path: ./go.sum
- name: Set up ko
uses: ko-build/setup-ko@d006021bd0c28d1ce33a07e7943d48b079944c8d # v0.9
- name: Log in to GHCR
uses: docker/login-action@4907a6ddec9925e35a0a9e82d7399ccc52663121 # v4.1.0
with:
registry: ghcr.io
username: ${{ secrets.CR_USER }}
password: ${{ secrets.CR_TOKEN }}
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@4d04d5d9486b7bd6fa91e7baf45bbb4f8b9deedd # v4.0.0
- name: Build and push images
id: build
env:
GIT_REF: ${{ needs.gate.outputs.pr_head_sha }}
run: |
# Use first 8 chars of the git ref (POSIX-compliant)
IMAGE_TAG="ref-$(printf '%s' "$GIT_REF" | cut -c1-8)"
reg="${{ github.repository }}"
CONTAINER_IMG_REG="ghcr.io/${reg@L}"
echo "Building images with tag: $IMAGE_TAG"
echo "Registry: $CONTAINER_IMG_REG"
# Build controller (ko)
make build-controller \
CONTAINER_IMG_REG="$CONTAINER_IMG_REG" \
IMAGE_TAG="$IMAGE_TAG"
# Build launcher-populator (ko)
make build-populator \
CONTAINER_IMG_REG="$CONTAINER_IMG_REG" \
IMAGE_TAG="$IMAGE_TAG"
# Build requester (Docker, multi-platform)
make build-and-push-requester \
CONTAINER_IMG_REG="$CONTAINER_IMG_REG" \
REQUESTER_IMG_TAG="$IMAGE_TAG"
# Build launcher (Docker, GPU-capable)
make build-and-push-launcher \
CONTAINER_IMG_REG="$CONTAINER_IMG_REG" \
LAUNCHER_IMG_TAG="$IMAGE_TAG"
echo "image_tag=${IMAGE_TAG}" >> $GITHUB_OUTPUT
echo "controller_image=${CONTAINER_IMG_REG}/dual-pods-controller:${IMAGE_TAG}" >> $GITHUB_OUTPUT
echo "requester_image=${CONTAINER_IMG_REG}/requester:${IMAGE_TAG}" >> $GITHUB_OUTPUT
echo "launcher_image=${CONTAINER_IMG_REG}/launcher:${IMAGE_TAG}" >> $GITHUB_OUTPUT
echo "All images built and pushed"
# Run e2e tests on OpenShift self-hosted runner
e2e-openshift:
runs-on: [self-hosted, openshift, vllm-d]
needs: [gate, build-image]
if: needs.gate.outputs.should_run == 'true'
env:
SKIP_CLEANUP: ${{ github.event.inputs.skip_cleanup || 'false' }}
# PR-specific namespace for isolation between concurrent PR tests
FMA_NAMESPACE: fma-e2e-pr-${{ needs.gate.outputs.pr_number || github.run_id }}
# Unique release name per run to avoid conflicts
FMA_CHART_INSTANCE_NAME: fma-e2e-${{ github.run_id }}
# Image registry and tag from the build job
IMAGE_TAG: ${{ needs.build-image.outputs.image_tag }}
# LAUNCHER_IMAGE and REQUESTER_IMAGE are needed by test object creation
# and cleanup step (rm-images-from-ocp-nodes.sh)
LAUNCHER_IMAGE: ${{ needs.build-image.outputs.launcher_image }}
REQUESTER_IMAGE: ${{ needs.build-image.outputs.requester_image }}
steps:
- name: Checkout source
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
ref: ${{ needs.gate.outputs.pr_head_sha }}
- name: Install tools (kubectl, oc, helm)
run: |
# Install kubectl - pinned version for reproducible CI builds
KUBECTL_VERSION="v1.31.0"
echo "Installing kubectl version: $KUBECTL_VERSION"
curl -fsSL --retry 3 --retry-delay 5 -o kubectl "https://dl.k8s.io/release/${KUBECTL_VERSION}/bin/linux/amd64/kubectl"
curl -fsSL --retry 3 --retry-delay 5 -o kubectl.sha256 "https://dl.k8s.io/release/${KUBECTL_VERSION}/bin/linux/amd64/kubectl.sha256"
echo "$(cat kubectl.sha256) kubectl" | sha256sum --check
chmod +x kubectl
sudo mv kubectl /usr/local/bin/
rm -f kubectl.sha256
# Install oc (OpenShift CLI)
curl -fsSL --retry 3 --retry-delay 5 -O "https://mirror.openshift.com/pub/openshift-v4/clients/ocp/stable/openshift-client-linux.tar.gz"
tar -xzf openshift-client-linux.tar.gz
sudo mv oc /usr/local/bin/
rm -f openshift-client-linux.tar.gz kubectl README.md
# Install helm
curl -fsSL --retry 3 --retry-delay 5 https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 | bash
# Install yq if not already present (for YAML-to-JSON conversion in deploy_fma.sh)
if ! command -v yq &>/dev/null; then
YQ_VERSION="v4.53.2"
curl -fsSL --retry 3 --retry-delay 5 -o yq "https://github.com/mikefarah/yq/releases/download/${YQ_VERSION}/yq_linux_amd64"
echo "d56bf5c6819e8e696340c312bd70f849dc1678a7cda9c2ad63eebd906371d56b yq" | sha256sum --check
chmod +x yq
sudo mv yq /usr/local/bin/
fi
- name: Verify cluster access
run: |
echo "Verifying cluster access..."
kubectl cluster-info
kubectl get nodes
- name: Dump select info about each node with a GPU
run: |
for nodename in $(kubectl get nodes -l nvidia.com/gpu.present=true -o jsonpath='{.items[*].metadata.name}'); do
echo "For ${nodename}:"
echo "taints: $(kubectl get node $nodename -o jsonpath='{.spec.taints}')"
echo "conditions: $(kubectl get node $nodename -o jsonpath='{.status.conditions}' | jq .)"
echo "FMA images: $(kubectl get node $nodename -o jsonpath='{.status.images}' | jq '[ .[] | select(.names | any(contains("fast-model-actuation"))) | {"names":.names, "sizeMB":(.sizeBytes/1048576|floor) } ]')"
echo
done
continue-on-error: true
- name: Detect cluster type
run: |
CLUSTER_DOMAIN=$(oc get ingress.config cluster -o jsonpath='{.spec.domain}')
echo "Cluster domain: $CLUSTER_DOMAIN"
if echo "$CLUSTER_DOMAIN" | grep -q "pokprod"; then
echo "Detected pokprod cluster"
echo "RUNTIME_CLASS_NAME=nvidia" >> $GITHUB_ENV
fi
- name: Clean up resources for this PR
run: |
echo "Cleaning up FMA resources for this PR..."
echo " FMA_NAMESPACE: $FMA_NAMESPACE"
if kubectl get namespace "$FMA_NAMESPACE" &>/dev/null; then
echo "=== Cleaning up namespace: $FMA_NAMESPACE ==="
# Uninstall all helm releases in the namespace
for release in $(helm list -n "$FMA_NAMESPACE" -q 2>/dev/null); do
echo " Uninstalling helm release: $release"
helm uninstall "$release" -n "$FMA_NAMESPACE" --ignore-not-found --wait --timeout 60s || true
done
# Remove dual-pods.llm-d.ai/* finalizers from all pods so namespace deletion is not blocked
echo " Removing dual-pods finalizers from pods in $FMA_NAMESPACE..."
for pod in $(kubectl get pods -n "$FMA_NAMESPACE" -o jsonpath='{.items[*].metadata.name}' 2>/dev/null); do
all_finalizers=$(kubectl get pod "$pod" -n "$FMA_NAMESPACE" \
-o jsonpath='{range .metadata.finalizers[*]}{@}{"\n"}{end}' 2>/dev/null || true)
if ! echo "$all_finalizers" | grep -q '^dual-pods\.llm-d\.ai/'; then
continue
fi
echo " Patching pod $pod to remove dual-pods finalizers"
keep_entries=$(echo "$all_finalizers" \
| grep -v '^dual-pods\.llm-d\.ai/' \
| awk 'NR>1{printf ","} {printf "\"%s\"", $0}')
kubectl patch pod "$pod" -n "$FMA_NAMESPACE" --type=merge \
-p="{\"metadata\":{\"finalizers\":[${keep_entries}]}}" 2>/dev/null || true
done
echo " Deleting namespace: $FMA_NAMESPACE"
kubectl delete namespace "$FMA_NAMESPACE" --ignore-not-found --timeout=120s || true
else
echo "Namespace $FMA_NAMESPACE does not exist, skipping cleanup"
fi
# Clean up cluster-scoped resources from previous runs
echo "Cleaning up cluster-scoped resources..."
kubectl delete clusterrole "${FMA_CHART_INSTANCE_NAME}-node-view" --ignore-not-found || true
kubectl delete clusterrolebinding "${FMA_CHART_INSTANCE_NAME}-node-view" --ignore-not-found || true
echo "Cleanup complete"
- name: Create namespace
run: |
# Wait for namespace to be fully deleted if still terminating
if kubectl get namespace "$FMA_NAMESPACE" &>/dev/null; then
echo "Waiting for namespace $FMA_NAMESPACE to be deleted..."
while kubectl get namespace "$FMA_NAMESPACE" &>/dev/null; do
echo "Namespace still terminating..."
sleep 2
done
fi
echo "Creating namespace $FMA_NAMESPACE..."
kubectl create namespace "$FMA_NAMESPACE"
- name: Deploy FMA
env:
CONTAINER_IMG_REG: ghcr.io/${{ github.repository }}
NODE_VIEW_CLUSTER_ROLE: "create/please"
run: |
export CONTAINER_IMG_REG="${CONTAINER_IMG_REG,,}"
./test/e2e/deploy_fma.sh
- name: Dump InferenceServerConfig CRD
if: always()
run: kubectl get crd inferenceserverconfigs.fma.llm-d.ai -o yaml
- name: Dump LauncherConfig CRD
if: always()
run: kubectl get crd launcherconfigs.fma.llm-d.ai -o yaml
- name: Dump LauncherPopulationPolicy CRD
if: always()
run: kubectl get crd launcherpopulationpolicies.fma.llm-d.ai -o yaml
- name: Run E2E tests
env:
MKOBJS_SCRIPT: ./test/e2e/mkobjs-openshift.sh
run: ./test/e2e/test-cases.sh
- name: Dump GPU allocation per node
if: always()
run: |
echo "=== GPU allocation per node ==="
for nodename in $(kubectl get nodes -l nvidia.com/gpu.present=true -o jsonpath='{.items[*].metadata.name}'); do
allocatable=$(kubectl get node "$nodename" -o jsonpath='{.status.allocatable.nvidia\.com/gpu}')
allocated=$(kubectl get pods --all-namespaces --field-selector spec.nodeName="$nodename" -o json \
| jq '[.items[]
| select(.status.phase != "Succeeded" and .status.phase != "Failed")
| select(.metadata.deletionTimestamp == null)
| .spec.containers[]?.resources.limits["nvidia.com/gpu"] // "0"
| tonumber] | add // 0')
echo "Node $nodename: allocatable=$allocatable allocated=$allocated available=$((allocatable - allocated))"
echo " GPU-consuming pods in $FMA_NAMESPACE:"
kubectl get pods -n "$FMA_NAMESPACE" --field-selector spec.nodeName="$nodename" -o json \
| jq -r '.items[]
| select(.status.phase != "Succeeded" and .status.phase != "Failed")
| select(.metadata.deletionTimestamp == null)
| ([.spec.containers[]?.resources.limits["nvidia.com/gpu"] // "0" | tonumber] | add) as $gpu_total
| select($gpu_total > 0)
| " \(.metadata.name) gpu=\($gpu_total)"'
done
continue-on-error: true
- name: List objects of category all
if: always()
run: kubectl get all -n "$FMA_NAMESPACE"
- name: Dump InferenceServerConfig objects
if: always()
run: kubectl get inferenceserverconfigs -n "$FMA_NAMESPACE" -o yaml
- name: Dump LauncherConfig objects
if: always()
run: kubectl get launcherconfigs -n "$FMA_NAMESPACE" -o yaml
- name: Dump LauncherPopulationPolicy objects
if: always()
run: kubectl get launcherpopulationpolicies -n "$FMA_NAMESPACE" -o yaml
- name: Dump all Pods
if: always()
run: kubectl get pods -n "$FMA_NAMESPACE" -o yaml
- name: List event objects
if: always()
run: kubectl get events -n "$FMA_NAMESPACE" --sort-by='.lastTimestamp'
- name: Dump Pod logs
if: always()
run: |
for pod in $(kubectl get pods -n "$FMA_NAMESPACE" -o 'jsonpath={.items[*].metadata.name} ') ; do
containers=$(kubectl get pod -n "$FMA_NAMESPACE" "$pod" -o 'jsonpath={.spec.containers[*].name}')
for container in $containers ; do
echo ""
echo "=== Previous log of $pod (container: $container) ==="
kubectl logs -n "$FMA_NAMESPACE" "$pod" -c "$container" --previous || true
echo ""
echo "=== Log of $pod (container: $container) ==="
kubectl logs -n "$FMA_NAMESPACE" "$pod" -c "$container" || true
done
done
- name: Dump vLLM instance logs from launchers
if: always()
run: scripts/dump-launcher-vllm-logs.sh "$FMA_NAMESPACE"
- name: Clean up test objects
if: always()
run: |
echo "Cleaning up test objects..."
kubectl delete rs -n "$FMA_NAMESPACE" -l fma-e2e-instance --ignore-not-found || true
kubectl delete launcherpopulationpolicy -n "$FMA_NAMESPACE" -l fma-e2e-instance --ignore-not-found || true
kubectl delete inferenceserverconfig -n "$FMA_NAMESPACE" -l fma-e2e-instance --ignore-not-found || true
kubectl delete launcherconfig -n "$FMA_NAMESPACE" -l fma-e2e-instance --ignore-not-found || true
# Wait for test pods to terminate
sleep 10
echo "Test objects cleaned up"
- name: Cleanup infrastructure
# Cleanup unless told not to
if: always() && env.SKIP_CLEANUP != 'true'
run: |
echo "Cleaning up all FMA test infrastructure..."
echo " FMA_NAMESPACE: $FMA_NAMESPACE"
echo " FMA_CHART_INSTANCE_NAME: $FMA_CHART_INSTANCE_NAME"
# Uninstall Helm releases
for release in $(helm list -n "$FMA_NAMESPACE" -q 2>/dev/null); do
echo " Uninstalling helm release: $release"
helm uninstall "$release" -n "$FMA_NAMESPACE" --ignore-not-found --wait --timeout 60s || true
done
# Remove dual-pods.llm-d.ai/* finalizers from all pods so namespace deletion is not blocked
echo " Removing dual-pods finalizers from pods in $FMA_NAMESPACE..."
for pod in $(kubectl get pods -n "$FMA_NAMESPACE" -o jsonpath='{.items[*].metadata.name}' 2>/dev/null); do
all_finalizers=$(kubectl get pod "$pod" -n "$FMA_NAMESPACE" \
-o jsonpath='{range .metadata.finalizers[*]}{@}{"\n"}{end}' 2>/dev/null || true)
if ! echo "$all_finalizers" | grep -q '^dual-pods\.llm-d\.ai/'; then
continue
fi
echo " Patching pod $pod to remove dual-pods finalizers"
keep_entries=$(echo "$all_finalizers" \
| grep -v '^dual-pods\.llm-d\.ai/' \
| awk 'NR>1{printf ","} {printf "\"%s\"", $0}')
kubectl patch pod "$pod" -n "$FMA_NAMESPACE" --type=merge \
-p="{\"metadata\":{\"finalizers\":[${keep_entries}]}}" 2>/dev/null || true
done
# Delete namespace
kubectl delete namespace "$FMA_NAMESPACE" \
--ignore-not-found --timeout=180s || true
# Delete cluster-scoped stuff for reading Node objects
kubectl delete clusterrole "${FMA_CHART_INSTANCE_NAME}-node-view" --ignore-not-found || true
kubectl delete clusterrolebinding "${FMA_CHART_INSTANCE_NAME}-node-view" --ignore-not-found || true
echo "Cleanup complete"
- name: Remove test images from cluster nodes
if: always() && env.SKIP_CLEANUP != 'true'
run: scripts/rm-images-from-ocp-nodes.sh "$LAUNCHER_IMAGE"
# Report status back to PR for issue_comment triggered runs
# This ensures fork PRs show the correct status after /ok-to-test runs complete
report-status:
runs-on: ubuntu-latest
needs: [gate, e2e-openshift]
# Run always (even on failure) but only for issue_comment events
if: always() && github.event_name == 'issue_comment' && needs.gate.outputs.should_run == 'true'
steps:
- name: Report status to PR
uses: actions/github-script@3a2844b7e9c422d3c10d287c895573f7108da1b3 # v9.0.0
with:
script: |
const prHeadSha = '${{ needs.gate.outputs.pr_head_sha }}';
const e2eResult = '${{ needs.e2e-openshift.result }}';
// Map job result to commit status
let state, description;
if (e2eResult === 'success') {
state = 'success';
description = 'E2E tests passed';
} else if (e2eResult === 'skipped') {
state = 'pending';
description = 'E2E tests skipped';
} else if (e2eResult === 'cancelled') {
state = 'failure';
description = 'E2E tests cancelled';
} else {
state = 'failure';
description = 'E2E tests failed';
}
console.log(`Reporting status to PR commit ${prHeadSha}: ${state} - ${description}`);
await github.rest.repos.createCommitStatus({
owner: context.repo.owner,
repo: context.repo.repo,
sha: prHeadSha,
state: state,
target_url: `https://github.com/${context.repo.owner}/${context.repo.repo}/actions/runs/${context.runId}`,
description: description,
context: '${{ github.workflow }} / e2e (comment trigger)'
});
console.log('Status reported successfully');