Skip to content

cicd:experiment

cicd:experiment #392

Workflow file for this run

name: ci
# Version 1.0.0
# This workflow is triggered on push events to the repository
# Please find readme.md for the usage of this workflow
on:
push:
branches:
- 'cicd*'
jobs:
PREPARE-job:
runs-on: ubuntu-latest
outputs:
# Job Output Variables:
# - proceed_valid: Whether to proceed with build (true/false, e.g., "true")
# - dockerfile_path: Path to the modified Dockerfile (e.g., "x86/ex1.dockerfile")
# - version: Semver version from Dockerfile (e.g., "0.0.5")
# - devmode: Development mode flag (true/false, e.g., "true")
# - noscan: Skip security scan flag (true/false, e.g., "false")
# - files: List of changed files for debugging (e.g., "x86/ex1.dockerfile")
# - dockerhub_available: Whether Docker Hub credentials are available (true/false)
# - quayio_available: Whether Quay.io credentials are available (true/false)
proceed_valid: ${{ steps.set_proceed_flag.outputs.proceed_valid }}
dockerfile_path: ${{ steps.set_proceed_flag.outputs.dockerfile_path }}
version: ${{ steps.set_proceed_flag.outputs.version }}
devmode: ${{ steps.set_proceed_flag.outputs.devmode }}
noscan: ${{ steps.set_proceed_flag.outputs.noscan }}
files: ${{ steps.changed_files.outputs.files }}
date: ${{ steps.date.outputs.date }}
dockerhub_available: ${{ steps.check_vars_secrets.outputs.dockerhub_available }}
quayio_available: ${{ steps.check_vars_secrets.outputs.quayio_available }}
setonixreg_available: ${{ steps.check_vars_secrets.outputs.setonixreg_available }}
steps:
- name: Set default runner label
id: set_default_runner_label
run: |
echo "runner_label=ubuntu-latest" >> $GITHUB_OUTPUT
- name: Check required variables and secrets
id: check_vars_secrets
run: |
missing_vars=()
missing_secrets=()
# check Variables
if [ -z "${{ vars.DOCKERHUB_USERNAME }}" ]; then
missing_vars+=("DOCKERHUB_USERNAME")
fi
if [ -z "${{ vars.QUAYIO_USERNAME }}" ]; then
missing_vars+=("QUAYIO_USERNAME")
fi
if [ -z "${{ vars.SETONIXREG_USERNAME }}" ]; then
missing_vars+=("SETONIXREG_USERNAME")
fi
if [ -z "${{ vars.ACACIA_BUCKETNAME }}" ]; then
missing_vars+=("ACACIA_BUCKETNAME")
fi
# check Secrets
if [ -z "${{ secrets.PAT_TOKEN }}" ]; then
missing_secrets+=("PAT_TOKEN")
fi
if [ -z "${{ secrets.DOCKERHUB_TOKEN }}" ]; then
missing_secrets+=("DOCKERHUB_TOKEN")
fi
if [ -z "${{ secrets.QUAYIO_TOKEN }}" ]; then
missing_secrets+=("QUAYIO_TOKEN")
fi
if [ -z "${{ secrets.SETONIXREG_PASS }}" ]; then
missing_secrets+=("SETONIXREG_PASS")
fi
if [ -z "${{ secrets.ACACIA_ACCESS_KEY_ID }}" ]; then
missing_secrets+=("ACACIA_ACCESS_KEY_ID")
fi
if [ -z "${{ secrets.ACACIA_SECRET_ACCESS_KEY }}" ]; then
missing_secrets+=("ACACIA_SECRET_ACCESS_KEY")
fi
# Log status of variables and secrets
if [ ${#missing_vars[@]} -ne 0 ]; then
echo "Missing Variables: ${missing_vars[@]}"
else
echo "All required variables are set."
fi
if [ ${#missing_secrets[@]} -ne 0 ]; then
echo "Missing Secrets: ${missing_secrets[@]}"
else
echo "All required secrets are set."
fi
# Set output flags for conditional job execution
dockerhub_available=$( [ -n "${{ vars.DOCKERHUB_USERNAME }}" ] && [ -n "${{ secrets.DOCKERHUB_TOKEN }}" ] && echo 'true' || echo 'false' )
quayio_available=$( [ -n "${{ vars.QUAYIO_USERNAME }}" ] && [ -n "${{ secrets.QUAYIO_TOKEN }}" ] && echo 'true' || echo 'false' )
setonixreg_available=$( [ -n "${{ vars.SETONIXREG_USERNAME }}" ] && [ -n "${{ secrets.SETONIXREG_PASS }}" ] && echo 'true' || echo 'false' )
echo "dockerhub_available=$dockerhub_available" >> $GITHUB_OUTPUT
echo "quayio_available=$quayio_available" >> $GITHUB_OUTPUT
echo "setonixreg_available=$setonixreg_available" >> $GITHUB_OUTPUT
# Log registry availability status
echo ""
echo "=== Registry Credentials Check ==="
if [ "$dockerhub_available" = "true" ]; then
echo "[✓] Docker Hub: Credentials available"
else
echo "[✗] Docker Hub: Missing credentials (DOCKERHUB_USERNAME or DOCKERHUB_TOKEN)"
fi
if [ "$quayio_available" = "true" ]; then
echo "[✓] Quay.io: Credentials available"
else
echo "[✗] Quay.io: Missing credentials (QUAYIO_USERNAME or QUAYIO_TOKEN)"
fi
if [ "$setonixreg_available" = "true" ]; then
echo "[✓] Setonix Registry: Credentials available"
else
echo "[✗] Setonix Registry: Missing credentials (SETONIXREG_USERNAME or SETONIXREG_PASS)"
fi
echo "==================================="
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 2 # Ensure enough history is available
- name: Verify git history depth
run: |
# Check if we have enough history for git diff
if [ -z "${{ github.event.before }}" ]; then
echo "Error: github.event.before is empty. Cannot perform git diff operation."
echo "This usually means the repository history is not available or fetch-depth is insufficient."
exit 1
fi
# Verify that we can access the previous commit
if ! git rev-parse --verify ${{ github.event.before }} >/dev/null 2>&1; then
echo "Error: Cannot access previous commit ${{ github.event.before }}"
echo "This indicates that fetch-depth: 2 is not sufficient or the history is not available."
exit 1
fi
echo "Git history verification passed. Previous commit ${{ github.event.before }} is accessible."
- name: Get changed files
id: changed_files
run: |
# Get all Dockerfile variants (case-insensitive)
files=$(git diff --name-only ${{ github.event.before }} ${{ github.sha }} | grep -i '\.dockerfile$' || true)
if [ -z "$files" ]; then
echo "No Dockerfile changes detected. This workflow only processes Dockerfile modifications."
echo "files=" >> $GITHUB_OUTPUT
echo "proceed_valid=false" >> $GITHUB_OUTPUT
exit 0
fi
# Count the number of changed files
file_count=$(echo "$files" | wc -l)
if [ "$file_count" -gt 1 ]; then
echo "Multiple Dockerfiles changed ($file_count files). This workflow only processes single file changes."
echo "files<<EOF" >> $GITHUB_OUTPUT
echo "$files" >> $GITHUB_OUTPUT
echo "EOF" >> $GITHUB_OUTPUT
echo "proceed_valid=false" >> $GITHUB_OUTPUT
exit 0
fi
echo "Single Dockerfile changed: $files"
echo "files<<EOF" >> $GITHUB_OUTPUT
echo "$files" >> $GITHUB_OUTPUT
echo "EOF" >> $GITHUB_OUTPUT
echo "proceed_valid=true" >> $GITHUB_OUTPUT
- name: Debug output of changed files
run: |
echo "Files from output: ${{ steps.changed_files.outputs.files }}"
- name: Validate file changes
id: validate_changes
if: steps.changed_files.outputs.proceed_valid == 'true'
run: |
changed_files="${{ steps.changed_files.outputs.files }}"
echo "Processing single file: $changed_files"
echo "valid=true" >> $GITHUB_OUTPUT
echo "dockerfile_path=$changed_files" >> $GITHUB_OUTPUT
- name: Validate version label
id: validate_version
if: steps.validate_changes.outputs.valid == 'true'
run: |
file="${{ steps.validate_changes.outputs.dockerfile_path }}"
if grep -q -E '^[^#]*LABEL\s+org\.opencontainers\.image\.version\s*=' "$file"; then
version=$(grep -E '^[^#]*LABEL\s+org\.opencontainers\.image\.version\s*=' "$file" | sed -E 's/^[^#]*LABEL\s+org\.opencontainers\.image\.version\s*=\s*"?([^"]*)"?.*/\1/')
if echo "$version" | grep -qE '^[0-9]+\.[0-9]+\.[0-9]+$'; then
echo "Version: $version ✓"
echo "valid=true" >> $GITHUB_OUTPUT
echo "version=$version" >> $GITHUB_OUTPUT
else
echo "Version: $version ✗ (invalid semver)"
echo "valid=false" >> $GITHUB_OUTPUT
echo "message=Invalid version: $version" >> $GITHUB_OUTPUT
fi
else
echo "Version: missing ✗"
echo "valid=false" >> $GITHUB_OUTPUT
echo "message=Missing version label" >> $GITHUB_OUTPUT
fi
- name: Parse optional settings
id: parse_settings
if: steps.validate_changes.outputs.valid == 'true'
run: |
file="${{ steps.validate_changes.outputs.dockerfile_path }}"
# Dev mode
if grep -q -E '^[^#]*LABEL\s+org\.opencontainers\.image\.devmode\s*=\s*true' "$file"; then
echo "DevMode: enabled"
echo "devmode=true" >> $GITHUB_OUTPUT
else
echo "DevMode: disabled"
echo "devmode=false" >> $GITHUB_OUTPUT
fi
# Scan settings
if grep -q -E '^[^#]*LABEL\s+org\.opencontainers\.image\.noscan\s*=\s*true' "$file"; then
if grep -q -E '^[^#]*LABEL\s+org\.opencontainers\.image\.noscanreason\s*' "$file"; then
reason=$(grep -E '^[^#]*LABEL\s+org\.opencontainers\.image\.noscanreason\s*' "$file")
echo "Scan: disabled ($reason)"
echo "noscan=true" >> $GITHUB_OUTPUT
echo "noscanreason=$reason" >> $GITHUB_OUTPUT
else
echo "Scan: disabled but no reason provided"
echo "noscan=false" >> $GITHUB_OUTPUT
fi
else
echo "Scan: enabled"
echo "noscan=false" >> $GITHUB_OUTPUT
fi
- name: Set proceed flag
id: set_proceed_flag
run: |
files_proceed="${{ steps.changed_files.outputs.proceed_valid }}"
changes_valid="${{ steps.validate_changes.outputs.valid }}"
version_valid="${{ steps.validate_version.outputs.valid }}"
# If files check failed, exit early
if [ "$files_proceed" != "true" ]; then
echo "(FAILED) File change validation failed"
echo "proceed_valid=false" >> $GITHUB_OUTPUT
echo "devmode=false" >> $GITHUB_OUTPUT
echo "noscan=false" >> $GITHUB_OUTPUT
exit 0
fi
# Check version validation
if [ "$version_valid" = "true" ]; then
echo "(PASSED) All validations passed"
echo "proceed_valid=true" >> $GITHUB_OUTPUT
else
echo "(FAILED) Version validation: ${{ steps.validate_version.outputs.message }}"
echo "proceed_valid=false" >> $GITHUB_OUTPUT
fi
# Pass through outputs
echo "dockerfile_path=${{ steps.validate_changes.outputs.dockerfile_path }}" >> $GITHUB_OUTPUT
echo "version=${{ steps.validate_version.outputs.version }}" >> $GITHUB_OUTPUT
echo "devmode=${{ steps.parse_settings.outputs.devmode || 'false' }}" >> $GITHUB_OUTPUT
echo "noscan=${{ steps.parse_settings.outputs.noscan || 'false' }}" >> $GITHUB_OUTPUT
- name: Set current date
if: steps.set_proceed_flag.outputs.proceed_valid == 'true'
id: date
run: |
date_tag=$(date +'%m-%d')
echo "Date tag: $date_tag"
echo "date=$date_tag" >> $GITHUB_OUTPUT
BUILD-job:
needs: PREPARE-job
runs-on: setonix-podman02
if: needs.PREPARE-job.outputs.proceed_valid == 'true'
outputs:
image_tag: ${{ steps.build_container.outputs.image_tag }}
dockerfile_name: ${{ steps.build_container.outputs.dockerfile_name }}
steps:
- name: Print hostname
run: |
echo "Hostname: $(hostname)"
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 1 # As the runs-on machine maybe different from Build, re-checkout source code. Only the current commit is needed
- name: Setup container environment
id: setup_env
run: |
echo "Setting up container environment variables..."
# Export environment variables
export XDG_DATA_HOME=/container/${USER}/data
export XDG_RUNTIME_DIR=/container/${USER}/runtime
export TMPDIR=/container/${USER}/tmp/
# Create required directories
mkdir -p ${XDG_DATA_HOME}
mkdir -p ${XDG_RUNTIME_DIR}
mkdir -p ${TMPDIR}
# Verify directories and output status
echo "Environment setup completed:"
echo "USER: ${USER}"
echo "XDG_DATA_HOME: ${XDG_DATA_HOME}"
echo "XDG_RUNTIME_DIR: ${XDG_RUNTIME_DIR}"
echo "TMPDIR: ${TMPDIR}"
# Check if directories exist
for dir in "${XDG_DATA_HOME}" "${XDG_RUNTIME_DIR}" "${TMPDIR}"; do
if [ -d "$dir" ]; then
echo "✓ Podman Directory exists: $dir"
else
echo "✗ Podman Directory missing: $dir"
exit 1
fi
done
# Set environment variables for subsequent steps
echo "XDG_DATA_HOME=${XDG_DATA_HOME}" >> $GITHUB_ENV
echo "XDG_RUNTIME_DIR=${XDG_RUNTIME_DIR}" >> $GITHUB_ENV
echo "TMPDIR=${TMPDIR}" >> $GITHUB_ENV
- name: Setup podman cache
id: setup_cache
run: |
# Setup podman cache directory
CACHE_DIR="/container/${USER}/podman-cache"
mkdir -p "$CACHE_DIR"
echo "[CACHE SETUP]:"
echo " Cache directory: $CACHE_DIR"
# Configure podman to use cache
export TMPDIR="${TMPDIR:-/tmp}"
# Check cache size if exists
if [ -d "$CACHE_DIR" ]; then
cache_size=$(du -sh "$CACHE_DIR" 2>/dev/null | cut -f1 || echo "0")
echo " Current cache size: $cache_size"
else
echo " Cache directory created (empty)"
fi
echo "CACHE_DIR=$CACHE_DIR" >> $GITHUB_ENV
- name: Build container with podman
id: build_container
run: |
# Get variables from PREPARE-job
dockerfile_path="${{ needs.PREPARE-job.outputs.dockerfile_path }}"
version="${{ needs.PREPARE-job.outputs.version }}"
# Extract filename without extension for tag
dockerfile_name=$(basename "$dockerfile_path" .dockerfile)
echo "Building container with podman..."
echo "Dockerfile: $dockerfile_path"
echo "Tag: ${dockerfile_name}:${version}"
# Build with layer caching enabled
podman build --format=docker \
--layers \
-f "$dockerfile_path" \
-t "${dockerfile_name}:${version}" \
$(dirname "$dockerfile_path")
# Verify the image was built
if podman images | grep -q "${dockerfile_name}"; then
echo "✓ Image built successfully: ${dockerfile_name}:${version}"
# Show caching information
echo "[LAYER CACHE]: Podman layer caching enabled"
echo "[OPTIMIZATION]: Subsequent builds will reuse unchanged layers"
echo "[NOTE]: APT cache within container is cleaned as per Dockerfile"
# Show all images to see layer reuse
echo "[ALL IMAGES]:"
podman images | head -10
else
echo "✗ Failed to build image: ${dockerfile_name}:${version}"
exit 1
fi
# Set outputs for next step
# Output Variables:
# - image_tag: Complete image tag with version (e.g., "ex1:0.0.5")
# - dockerfile_name: Base name without extension (e.g., "ex1")
echo "image_tag=${dockerfile_name}:${version}" >> $GITHUB_OUTPUT
echo "dockerfile_name=${dockerfile_name}" >> $GITHUB_OUTPUT
- name: Save container to archive
id: save_container
run: |
# Get variables from previous step
image_tag="${{ steps.build_container.outputs.image_tag }}"
dockerfile_name="${{ steps.build_container.outputs.dockerfile_name }}"
version="${{ needs.PREPARE-job.outputs.version }}"
# Define output file
output_file="${dockerfile_name}_${version}.tar"
echo "Saving container to Docker archive..."
echo "Image: $image_tag"
echo "Output: $output_file"
# Save with podman using Docker archive format (compatible with Trivy)
podman save --format docker-archive "$image_tag" -o "$output_file"
# Verify the file was created
if [ -f "$output_file" ]; then
file_size=$(ls -lh "$output_file" | awk '{print $5}')
echo "✓ Archive saved successfully: $output_file (Size: $file_size)"
else
echo "✗ Failed to save archive: $output_file"
exit 1
fi
# Set outputs
# Output Variables:
# - archive_file: Archive filename only (e.g., "ex1_0.0.5.tar")
# - archive_path: Full path to archive (e.g., "/home/runner/work/repo/ex1_0.0.5.tar")
echo "archive_file=$output_file" >> $GITHUB_OUTPUT
echo "archive_path=${PWD}/$output_file" >> $GITHUB_OUTPUT
- name: Setup rclone for S3 upload
uses: ./.github/actions/setup-rclone
with:
access_key_id: ${{ secrets.ACACIA_ACCESS_KEY_ID }}
secret_access_key: ${{ secrets.ACACIA_SECRET_ACCESS_KEY }}
endpoint: https://projects.pawsey.org.au
bucket: ${{ vars.ACACIA_BUCKETNAME }}
destination_path: ${{ steps.save_container.outputs.archive_file }}
- name: Upload to S3 (no local storage)
id: s3_upload
run: |
# Get variables
archive_file="${{ steps.save_container.outputs.archive_file }}"
dockerfile_name="${{ steps.build_container.outputs.dockerfile_name }}"
version="${{ needs.PREPARE-job.outputs.version }}"
bucket="${{ vars.ACACIA_BUCKETNAME }}"
echo "Uploading directly to S3 (no local storage)..."
echo "Archive: $archive_file"
echo "Bucket: $bucket"
echo "S3 path: $archive_file"
# Calculate file size for optimization
FILE_SIZE=$(wc -c < "$archive_file")
echo "File size: $FILE_SIZE bytes"
# Set rclone parameters based on file size
if [ "$FILE_SIZE" -lt $((1024 * 1024 * 500)) ]; then
# < 500MB
S3_CHUNK_SIZE="16M"
S3_UPLOAD_CONCURRENCY=4
MULTI_THREAD_STREAMS=2
elif [ "$FILE_SIZE" -lt $((1024 * 1024 * 5000)) ]; then
# 500MB - 5GB
S3_CHUNK_SIZE="64M"
S3_UPLOAD_CONCURRENCY=8
MULTI_THREAD_STREAMS=4
else
# > 5GB
S3_CHUNK_SIZE="128M"
S3_UPLOAD_CONCURRENCY=16
MULTI_THREAD_STREAMS=8
fi
echo "Optimized settings:"
echo " S3 chunk size: $S3_CHUNK_SIZE"
echo " Upload concurrency: $S3_UPLOAD_CONCURRENCY"
echo " Multi-thread streams: $MULTI_THREAD_STREAMS"
# Upload to S3
./rclone copy "$archive_file" pawsey0012:"$bucket/" \
--multi-thread-streams=$MULTI_THREAD_STREAMS \
--s3-chunk-size=$S3_CHUNK_SIZE \
--s3-upload-concurrency=$S3_UPLOAD_CONCURRENCY \
--progress
# Verify upload
if ./rclone lsf pawsey0012:"$bucket/" | grep -q "^$archive_file$"; then
echo "✓ Archive successfully uploaded to S3"
echo " [BUCKET]: $bucket"
echo " [S3 PATH]: $archive_file"
echo " [SIZE]: $(ls -lh "$archive_file" | awk '{print $5}')"
else
echo "✗ Failed to verify S3 upload"
exit 1
fi
# Set outputs
echo "s3_bucket=$bucket" >> $GITHUB_OUTPUT
echo "s3_path=$archive_file" >> $GITHUB_OUTPUT
SCAN-AND-REPORT-job:
needs: [BUILD-job, PREPARE-job]
runs-on: setonix-podman02
if: needs.PREPARE-job.outputs.proceed_valid == 'true' && needs.PREPARE-job.outputs.noscan != 'true'
env:
DOCKERFILE_NAME: ${{ needs.BUILD-job.outputs.dockerfile_name }}
VERSION: ${{ needs.PREPARE-job.outputs.version }}
REPORT_DIR: ./trivy-reports
steps:
- name: Print host
run: |
echo "Hostname: $(hostname)"
- name: Download image archive from S3
id: locate
uses: ./.github/actions/setup-rclone
with:
access_key_id: ${{ secrets.ACACIA_ACCESS_KEY_ID }}
secret_access_key: ${{ secrets.ACACIA_SECRET_ACCESS_KEY }}
endpoint: https://projects.pawsey.org.au
bucket: ${{ vars.ACACIA_BUCKETNAME }}
destination_path: ${{ env.DOCKERFILE_NAME }}_${{ env.VERSION }}.tar
download_mode: true
dockerfile_name: ${{ env.DOCKERFILE_NAME }}
version: ${{ env.VERSION }}
load_to_podman: false
- name: Prepare report directory
run: |
mkdir -p "${REPORT_DIR}"
echo "Report directory created: ${REPORT_DIR}"
# Generate JSON report first
- name: Trivy (JSON scan for processing)
uses: aquasecurity/trivy-action@master
with:
scan-type: image
input: ./${{ steps.locate.outputs.archive_name }}
format: json
output: ${{ env.REPORT_DIR }}/scan-results.json
severity: CRITICAL,HIGH,MEDIUM,LOW,UNKNOWN
ignore-unfixed: true
vuln-type: os,library
exit-code: '0'
hide-progress: true
cache: true
env:
TRIVY_SKIP_DB_UPDATE: false
TRIVY_SKIP_JAVA_DB_UPDATE: false
# Process JSON to create readable summary
- name: Generate readable summary from JSON
run: |
python3 -c "
import json
import sys
import os
json_file = '${REPORT_DIR}/scan-results.json'
output_file = '${REPORT_DIR}/summary.md'
try:
with open(json_file, 'r') as f:
data = json.load(f)
# Initialize counters
counts = {'CRITICAL': 0, 'HIGH': 0, 'MEDIUM': 0, 'LOW': 0, 'UNKNOWN': 0}
vulnerabilities = []
# Process results
if 'Results' in data and data['Results']:
for result in data['Results']:
if 'Vulnerabilities' in result and result['Vulnerabilities']:
for vuln in result['Vulnerabilities']:
severity = vuln.get('Severity', 'UNKNOWN')
if severity in counts:
counts[severity] += 1
vulnerabilities.append(vuln)
# Generate summary
with open(output_file, 'w') as f:
artifact_name = data.get('ArtifactName', 'Unknown')
f.write(f'## Trivy Scan Summary — {artifact_name}\n\n')
f.write(f'**Target:** {artifact_name}\n')
f.write(f'**Total Vulnerabilities:** {sum(counts.values())}\n\n')
f.write('### Severity Counts\n')
for severity, count in counts.items():
if count > 0:
f.write(f'- **{severity}:** {count}\n')
else:
f.write(f'- {severity}: {count}\n')
f.write('\n')
# Show critical and high details
critical_high = [v for v in vulnerabilities if v.get('Severity') in ['CRITICAL', 'HIGH']]
if critical_high:
f.write('### Critical & High Severity Vulnerabilities\n\n')
f.write('| Severity | CVE ID | Package | Installed | Fixed |\n')
f.write('|----------|--------|---------|-----------|-------|\n')
for vuln in critical_high[:15]: # Limit to first 15
severity = vuln.get('Severity', 'N/A')
cve_id = vuln.get('VulnerabilityID', 'N/A')
pkg = vuln.get('PkgName', 'N/A')
installed = vuln.get('InstalledVersion', 'N/A')
fixed = vuln.get('FixedVersion', 'N/A') or '❌'
f.write(f'| {severity} | {cve_id} | {pkg} | {installed} | {fixed} |\n')
if len(critical_high) > 15:
f.write(f'\n*... and {len(critical_high) - 15} more critical/high vulnerabilities.*\n')
elif sum(counts.values()) > 0:
f.write('### Good News! 🎉\n\n')
f.write('No CRITICAL or HIGH severity vulnerabilities found.\n')
if counts['MEDIUM'] > 0:
f.write(f'Only {counts[\"MEDIUM\"]} MEDIUM severity issues detected.\n')
else:
f.write('### Excellent! ✅\n\n')
f.write('No vulnerabilities found in this image.\n')
print(f'Successfully generated summary: {output_file}')
except Exception as e:
with open(output_file, 'w') as f:
f.write(f'### ❌ Error Processing Scan Results\n\n')
f.write(f'**Error:** {str(e)}\n\n')
f.write(f'**JSON file:** {json_file}\n')
f.write(f'**File exists:** {os.path.exists(json_file)}\n')
print(f'Error: {e}', file=sys.stderr)
"
- name: Debug Trivy output
run: |
echo "=== Trivy output file exists? ==="
ls -la "${REPORT_DIR}/summary.md" || echo "Summary file not found"
echo ""
echo "=== Trivy output content ==="
cat "${REPORT_DIR}/summary.md" || echo "Cannot read summary file"
echo ""
echo "=== Report directory contents ==="
ls -la "${REPORT_DIR}/" || echo "Report directory not found"
- name: Append to GitHub Actions summary
run: |
{
echo "## Trivy Scan Report for \`${{ env.DOCKERFILE_NAME }}\`"
echo ""
echo "- **Scan target**: \`${{ steps.locate.outputs.archive_name }}\`"
echo "- **Archive source**: \`${{ steps.locate.outputs.archive_path }}\`"
echo "- **Version**: \`${{ env.VERSION }}\`"
echo ""
# Check if summary file exists and has content
if [ -f "${REPORT_DIR}/summary.md" ] && [ -s "${REPORT_DIR}/summary.md" ]; then
echo "### Scan Results"
cat "${REPORT_DIR}/summary.md"
else
echo "### ⚠️ Scan Results"
echo ""
echo "**Status:** Summary file not generated or empty"
echo ""
echo "**Possible causes:**"
echo "- Trivy template processing failed"
echo "- Archive file format issue"
echo "- Trivy action configuration problem"
echo ""
echo "**Debug info:**"
echo "- Template file: \`.github/trivy-summary.tpl\`"
echo "- Expected output: \`${REPORT_DIR}/summary.md\`"
echo ""
echo "Check the debug steps above for more details."
fi
} >> "$GITHUB_STEP_SUMMARY"
# Generate SARIF report for GitHub Security
- name: Trivy (SARIF for GitHub Security)
uses: aquasecurity/trivy-action@master
if: always()
with:
scan-type: image
input: ./${{ steps.locate.outputs.archive_name }}
format: sarif
output: ${{ env.REPORT_DIR }}/trivy-results.sarif
severity: CRITICAL,HIGH,MEDIUM
ignore-unfixed: true
vuln-type: os,library
exit-code: '0'
hide-progress: true
cache: true
env:
TRIVY_SKIP_DB_UPDATE: true
TRIVY_SKIP_JAVA_DB_UPDATE: true
- name: Upload SARIF to Security tab
uses: github/codeql-action/upload-sarif@v3
if: always()
with:
sarif_file: ${{ env.REPORT_DIR }}/trivy-results.sarif
- name: Upload Trivy artifacts
uses: actions/upload-artifact@v4
if: always()
with:
name: trivy-reports-${{ env.DOCKERFILE_NAME }}-${{ env.VERSION }}
path: ${{ env.REPORT_DIR }}/
retention-days: 30
PUSH-PRIV-job:
needs: [BUILD-job, PREPARE-job]
runs-on: setonix-podman02
if: needs.PREPARE-job.outputs.proceed_valid == 'true'
env:
DOCKERFILE_NAME: ${{ needs.BUILD-job.outputs.dockerfile_name }}
VERSION: ${{ needs.PREPARE-job.outputs.version }}
BUCKET: ${{ vars.ACACIA_BUCKETNAME }}
steps:
- name: Print hostname
run: |
echo "Hostname: $(hostname)"
- name: Download archive from S3
id: locate_archive
uses: ./.github/actions/setup-rclone
with:
access_key_id: ${{ secrets.ACACIA_ACCESS_KEY_ID }}
secret_access_key: ${{ secrets.ACACIA_SECRET_ACCESS_KEY }}
endpoint: https://projects.pawsey.org.au
bucket: ${{ env.BUCKET }}
destination_path: ${{ env.DOCKERFILE_NAME }}_${{ env.VERSION }}.tar
download_mode: true
dockerfile_name: ${{ env.DOCKERFILE_NAME }}
version: ${{ env.VERSION }}
load_to_podman: false
- name: "[Setonix Registry] Login and Push"
if: needs.PREPARE-job.outputs.setonixreg_available == 'true'
continue-on-error: true
run: |
set +e # Don't exit on errors to allow independent execution
echo "=========================================="
echo " Setonix Registry Push Process"
echo "=========================================="
# Load image from archive first
echo "📦 Loading image from archive..."
archive_file="${{ steps.locate_archive.outputs.archive_name }}"
if podman load -i "$archive_file"; then
echo "✓ Image loaded successfully from archive"
else
echo "✗ Failed to load image from archive"
exit 1
fi
# Login to Setonix Registry
echo "🔐 Logging into Setonix Registry..."
if podman login https://setonix-registry.pawsey.org.au -u "${{ vars.SETONIXREG_USERNAME }}" -p "${{ secrets.SETONIXREG_PASS }}"; then
echo "✓ Setonix Registry login successful"
else
echo "✗ Setonix Registry login failed"
exit 1
fi
# Tag and push
image_tag="${DOCKERFILE_NAME}:${VERSION}"
setonix_tag="setonix-registry.pawsey.org.au/${{ vars.SETONIXREG_USERNAME }}/${DOCKERFILE_NAME}:${VERSION}"
echo ""
echo "🏷️ Tagging image for Setonix Registry:"
echo " Source: $image_tag"
echo " Target: $setonix_tag"
if podman tag "$image_tag" "$setonix_tag"; then
echo "✓ Image tagged successfully"
else
echo "✗ Image tagging failed"
exit 1
fi
echo ""
echo "📤 Pushing to Setonix Registry..."
if podman push "$setonix_tag"; then
echo "✅ Successfully pushed to Setonix Registry: $setonix_tag"
else
echo "❌ Failed to push to Setonix Registry"
exit 1
fi
# Cleanup local tagged image
echo ""
echo "🧹 Cleaning up local Setonix tagged image..."
podman rmi "$setonix_tag" 2>/dev/null || echo " (Setonix tag already removed or not found)"
podman rmi "$image_tag" 2>/dev/null || echo " (Base image already removed or not found)"
PUSH-PUBLIC-job:
needs: [BUILD-job, PREPARE-job, PUSH-PRIV-job]
runs-on: setonix-podman02
if: needs.PREPARE-job.outputs.proceed_valid == 'true'
environment:
name: manual_approval
env:
DOCKERFILE_NAME: ${{ needs.BUILD-job.outputs.dockerfile_name }}
VERSION: ${{ needs.PREPARE-job.outputs.version }}
steps:
- name: Print hostname and registry push plan
run: |
echo "Hostname: $(hostname)"
echo ""
echo "=== Public Registry Push Plan ==="
echo "Registry credentials were checked in PREPARE-job:"
echo ""
# Show Docker Hub push plan
if [ "${{ needs.PREPARE-job.outputs.dockerhub_available }}" = "true" ]; then
echo "[✓] Docker Hub: Will push to ${{ vars.DOCKERHUB_USERNAME }}/${DOCKERFILE_NAME}:${VERSION}"
else
echo "[skip] Docker Hub: Skipping (credentials not available)"
fi
# Show Quay.io push plan
if [ "${{ needs.PREPARE-job.outputs.quayio_available }}" = "true" ]; then
echo "[✓] Quay.io: Will push to quay.io/${{ vars.QUAYIO_USERNAME }}/${DOCKERFILE_NAME}:${VERSION}"
else
echo "[skip] Quay.io: Skipping (credentials not available)"
fi
# Show Setonix Registry status (already pushed in PUSH-PRIV-job)
if [ "${{ needs.PREPARE-job.outputs.setonixreg_available }}" = "true" ]; then
echo "[✓] Setonix Registry: Already pushed in PUSH-PRIV-job"
else
echo "[skip] Setonix Registry: Skipped in PUSH-PRIV-job (credentials not available)"
fi
# Check if any public registry is available
if [ "${{ needs.PREPARE-job.outputs.dockerhub_available }}" != "true" ] && [ "${{ needs.PREPARE-job.outputs.quayio_available }}" != "true" ]; then
echo ""
echo "[INFO] No public registry pushes will be performed in this job."
echo "Note: Setonix Registry push (if configured) is handled in PUSH-PRIV-job."
fi
echo ""
- name: Download and load image from S3
id: locate_and_load
if: needs.PREPARE-job.outputs.dockerhub_available == 'true' || needs.PREPARE-job.outputs.quayio_available == 'true'
uses: ./.github/actions/setup-rclone
with:
access_key_id: ${{ secrets.ACACIA_ACCESS_KEY_ID }}
secret_access_key: ${{ secrets.ACACIA_SECRET_ACCESS_KEY }}
endpoint: https://projects.pawsey.org.au
bucket: ${{ vars.ACACIA_BUCKETNAME }}
destination_path: ${{ env.DOCKERFILE_NAME }}_${{ env.VERSION }}.tar
download_mode: true
dockerfile_name: ${{ env.DOCKERFILE_NAME }}
version: ${{ env.VERSION }}
load_to_podman: true
- name: Skip message when no credentials available
if: needs.PREPARE-job.outputs.dockerhub_available != 'true' && needs.PREPARE-job.outputs.quayio_available != 'true'
run: |
echo ""
echo "=========================================="
echo " Public Registry Push: SKIPPED"
echo "=========================================="
echo ""
echo "No public registry credentials were found."
echo "The workflow will complete successfully without pushing to public registries."
echo ""
echo "This is normal behavior when:"
echo "• You only want to use private storage (S3)"
echo "• Credentials are not yet configured"
echo "• Testing the build pipeline"
echo ""
echo "To enable public registry pushes in the future, configure:"
echo "• Docker Hub: Set DOCKERHUB_USERNAME (repo variable) and DOCKERHUB_TOKEN (repo secret)"
echo "• Quay.io: Set QUAYIO_USERNAME (repo variable) and QUAYIO_TOKEN (repo secret)"
echo ""
echo "[✓] Job completed successfully (no action required)"
echo ""
- name: Prepare Docker config directory
if: needs.PREPARE-job.outputs.dockerhub_available == 'true' || needs.PREPARE-job.outputs.quayio_available == 'true'
run: |
mkdir -p ~/.docker
echo "Created Docker config directory: ~/.docker"
# ============================================
# Docker Hub Push (Independent Operation)
# ============================================
- name: "[Docker Hub] Login and Push"
if: needs.PREPARE-job.outputs.dockerhub_available == 'true'
continue-on-error: true
run: |
set +e # Don't exit on errors to allow independent execution
echo "=========================================="
echo " Docker Hub Push Process"
echo "=========================================="
# Login to Docker Hub
echo "🔐 Logging into Docker Hub..."
if podman login docker.io -u "${{ vars.DOCKERHUB_USERNAME }}" -p "${{ secrets.DOCKERHUB_TOKEN }}"; then
echo "✓ Docker Hub login successful"
else
echo "✗ Docker Hub login failed"
exit 1
fi
# Tag and push
image_tag="${{ steps.locate_and_load.outputs.image_tag }}"
dockerhub_tag="${{ vars.DOCKERHUB_USERNAME }}/${DOCKERFILE_NAME}:${VERSION}"
echo ""
echo "🏷️ Tagging image for Docker Hub:"
echo " Source: $image_tag"
echo " Target: $dockerhub_tag"
if podman tag "$image_tag" "$dockerhub_tag"; then
echo "✓ Image tagged successfully"
else
echo "✗ Image tagging failed"
exit 1
fi
echo ""
echo "📤 Pushing to Docker Hub..."
if podman push "$dockerhub_tag"; then
echo "✅ Successfully pushed to Docker Hub: $dockerhub_tag"
else
echo "❌ Failed to push to Docker Hub"
exit 1
fi
# ============================================
# Quay.io Push (Independent Operation)
# ============================================
- name: "[Quay.io] Login and Push"
if: needs.PREPARE-job.outputs.quayio_available == 'true'
continue-on-error: true
run: |
set +e # Don't exit on errors to allow independent execution
echo "=========================================="
echo " Quay.io Push Process"
echo "=========================================="
# Login to Quay.io
echo "🔐 Logging into Quay.io..."
if podman login quay.io -u "${{ vars.QUAYIO_USERNAME }}" -p "${{ secrets.QUAYIO_TOKEN }}"; then
echo "✓ Quay.io login successful"
else
echo "✗ Quay.io login failed"
exit 1
fi
# Tag and push
image_tag="${{ steps.locate_and_load.outputs.image_tag }}"
quayio_tag="quay.io/${{ vars.QUAYIO_USERNAME }}/${DOCKERFILE_NAME}:${VERSION}"
echo ""
echo "🏷️ Tagging image for Quay.io:"
echo " Source: $image_tag"
echo " Target: $quayio_tag"
if podman tag "$image_tag" "$quayio_tag"; then
echo "✓ Image tagged successfully"
else
echo "✗ Image tagging failed"
exit 1
fi
echo ""
echo "📤 Pushing to Quay.io..."
if podman push "$quayio_tag"; then
echo "✅ Successfully pushed to Quay.io: $quayio_tag"
else
echo "❌ Failed to push to Quay.io"
exit 1
fi
- name: Cleanup local images
if: always() && (needs.PREPARE-job.outputs.dockerhub_available == 'true' || needs.PREPARE-job.outputs.quayio_available == 'true')
run: |
echo "🧹 Cleaning up local images..."
image_tag="${{ steps.locate_and_load.outputs.image_tag }}"
# Remove original image
echo "Removing base image: $image_tag"
podman rmi "$image_tag" 2>/dev/null || echo " (Base image already removed or not found)"
# Remove Docker Hub tagged image if it was created
if [ "${{ needs.PREPARE-job.outputs.dockerhub_available }}" = "true" ]; then
dockerhub_tag="${{ vars.DOCKERHUB_USERNAME }}/${DOCKERFILE_NAME}:${VERSION}"
echo "Removing Docker Hub tag: $dockerhub_tag"
podman rmi "$dockerhub_tag" 2>/dev/null || echo " (Docker Hub tag already removed or not found)"
fi
# Remove Quay.io tagged image if it was created
if [ "${{ needs.PREPARE-job.outputs.quayio_available }}" = "true" ]; then
quayio_tag="quay.io/${{ vars.QUAYIO_USERNAME }}/${DOCKERFILE_NAME}:${VERSION}"
echo "Removing Quay.io tag: $quayio_tag"
podman rmi "$quayio_tag" 2>/dev/null || echo " (Quay.io tag already removed or not found)"
fi
echo "✅ Local image cleanup completed"
# CLEANUP-job:
# needs: [APPROVE-PUSH-PUB-job,PUSH-PRIV-job, SCAN-AND-REPORT-job, BUILD-job, PREPARE-job]
# if: always()
# runs-on: ${{ needs.PREPARE-job.outputs.runner_label }}
# steps:
# - name: Clean-up
# run: |
# sudo rm -rf $HOME/runner/artifacts/${{ needs.PREPARE-job.outputs.dockerfile_name }}-${{ needs.PREPARE-job.outputs.platform_tag }}
# DEPLOY-job:
# needs: [PUSH-PRIV-job,PREPARE-job]
# runs-on: Ella
# if: needs.PREPARE-job.outputs.platform_tag == 'arm'
# env:
# BUCKET: ${{ vars.ACACIA_BUCKETNAME }} # BYO or pawsey0001-image-compilation if compile for project
# DESTINATION_PATH: ${{ needs.PREPARE-job.outputs.dockerfile_name }}-${{ needs.PREPARE-job.outputs.platform_tag }}/${{ needs.PREPARE-job.outputs.date }}
# #environment:
# # name: manual_approval
# steps:
# - name: Checkout repository
# uses: actions/checkout@v4
# with:
# fetch-depth: 1 # As the runs-on machine maybe different from Build, re-checkout source code. Only the current commit is needed
# - name: Setup rclone
# uses: ./.github/actions/setup-rclone
# with:
# access_key_id: ${{ secrets.ACACIA_ACCESS_KEY_ID }}
# secret_access_key: ${{ secrets.ACACIA_SECRET_ACCESS_KEY }}
# endpoint: https://projects.pawsey.org.au
# bucket: ${{ env.BUCKET }}
# destination_path: ${{ env.DESTINATION_PATH }}
# - name: Deploy ARM image to Ella
# run: |
# echo "Deploying ARM image to Ella"
# echo "Hostname: $(hostname)"
# echo "Deploying image: ${{ needs.PREPARE-job.outputs.dockerfile_name }}-${{ needs.PREPARE-job.outputs.platform_tag }}:${{ needs.PREPARE-job.outputs.date }}" to Ella
# mkdir -p $MYSCRATCH/image/${{ needs.PREPARE-job.outputs.dockerfile_name }}/
# rclone copy pawsey0001:"${{ env.BUCKET }}/${{ env.DESTINATION_PATH }}/image.tar" $MYSCRATCH/image/${{ needs.PREPARE-job.outputs.dockerfile_name }}/
# - name: Convert to Singularity File
# run: |
# echo "Converting to Singularity File"
# echo "Converting image: ${{ needs.PREPARE-job.outputs.dockerfile_name }}-${{ needs.PREPARE-job.outputs.platform_tag }}:${{ needs.PREPARE-job.outputs.date }}" to Singularity
# source ~/.bashrc
# singularity build --force $MYSCRATCH/image/${{ needs.PREPARE-job.outputs.dockerfile_name }}/${{ needs.PREPARE-job.outputs.dockerfile_name }}-${{ needs.PREPARE-job.outputs.platform_tag }}.sif docker-archive://$MYSCRATCH/image/${{ needs.PREPARE-job.outputs.dockerfile_name }}/image.tar