Skip to content

cicd:experiment

cicd:experiment #388

Workflow file for this run

name: ci
# Version 1.0.0
# This workflow is triggered on push events to the repository
# Please find readme.md for the usage of this workflow
on:
push:
branches:
- 'cicd*'
jobs:
PREPARE-job:
runs-on: [self-hosted, Linux, X64, podman, test, setonix-podman02]
outputs:
# Job Output Variables:
# - proceed_valid: Whether to proceed with build (true/false, e.g., "true")
# - dockerfile_path: Path to the modified Dockerfile (e.g., "x86/ex1.dockerfile")
# - version: Semver version from Dockerfile (e.g., "0.0.5")
# - devmode: Development mode flag (true/false, e.g., "true")
# - noscan: Skip security scan flag (true/false, e.g., "false")
# - files: List of changed files for debugging (e.g., "x86/ex1.dockerfile")
# - dockerhub_available: Whether Docker Hub credentials are available (true/false)
# - quayio_available: Whether Quay.io credentials are available (true/false)
proceed_valid: ${{ steps.set_proceed_flag.outputs.proceed_valid }}
dockerfile_path: ${{ steps.set_proceed_flag.outputs.dockerfile_path }}
version: ${{ steps.set_proceed_flag.outputs.version }}
devmode: ${{ steps.set_proceed_flag.outputs.devmode }}
noscan: ${{ steps.set_proceed_flag.outputs.noscan }}
files: ${{ steps.changed_files.outputs.files }}
date: ${{ steps.date.outputs.date }}
dockerhub_available: ${{ steps.check_vars_secrets.outputs.dockerhub_available }}
quayio_available: ${{ steps.check_vars_secrets.outputs.quayio_available }}
setonixreg_available: ${{ steps.check_vars_secrets.outputs.setonixreg_available }}
steps:
- name: Set default runner label
id: set_default_runner_label
run: |
echo "runner_label=ubuntu-latest" >> $GITHUB_OUTPUT
- name: Check required variables and secrets
id: check_vars_secrets
run: |
missing_vars=()
missing_secrets=()
# check Variables
if [ -z "${{ vars.DOCKERHUB_USERNAME }}" ]; then
missing_vars+=("DOCKERHUB_USERNAME")
fi
if [ -z "${{ vars.QUAYIO_USERNAME }}" ]; then
missing_vars+=("QUAYIO_USERNAME")
fi
if [ -z "${{ vars.SETONIXREG_USERNAME }}" ]; then
missing_vars+=("SETONIXREG_USERNAME")
fi
if [ -z "${{ vars.ACACIA_BUCKETNAME }}" ]; then
missing_vars+=("ACACIA_BUCKETNAME")
fi
# check Secrets
if [ -z "${{ secrets.PAT_TOKEN }}" ]; then
missing_secrets+=("PAT_TOKEN")
fi
if [ -z "${{ secrets.DOCKERHUB_TOKEN }}" ]; then
missing_secrets+=("DOCKERHUB_TOKEN")
fi
if [ -z "${{ secrets.QUAYIO_TOKEN }}" ]; then
missing_secrets+=("QUAYIO_TOKEN")
fi
if [ -z "${{ secrets.SETONIXREG_PASS }}" ]; then
missing_secrets+=("SETONIXREG_PASS")
fi
if [ -z "${{ secrets.ACACIA_ACCESS_KEY_ID }}" ]; then
missing_secrets+=("ACACIA_ACCESS_KEY_ID")
fi
if [ -z "${{ secrets.ACACIA_SECRET_ACCESS_KEY }}" ]; then
missing_secrets+=("ACACIA_SECRET_ACCESS_KEY")
fi
# Log status of variables and secrets
if [ ${#missing_vars[@]} -ne 0 ]; then
echo "Missing Variables: ${missing_vars[@]}"
else
echo "All required variables are set."
fi
if [ ${#missing_secrets[@]} -ne 0 ]; then
echo "Missing Secrets: ${missing_secrets[@]}"
else
echo "All required secrets are set."
fi
# Set output flags for conditional job execution
echo "dockerhub_available=$( [ -n "${{ vars.DOCKERHUB_USERNAME }}" ] && [ -n "${{ secrets.DOCKERHUB_TOKEN }}" ] && echo 'true' || echo 'false' )" >> $GITHUB_OUTPUT
echo "quayio_available=$( [ -n "${{ vars.QUAYIO_USERNAME }}" ] && [ -n "${{ secrets.QUAYIO_TOKEN }}" ] && echo 'true' || echo 'false' )" >> $GITHUB_OUTPUT
echo "setonixreg_available=$( [ -n "${{ vars.SETONIXREG_USERNAME }}" ] && [ -n "${{ secrets.SETONIXREG_PASS }}" ] && echo 'true' || echo 'false' )" >> $GITHUB_OUTPUT
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 2 # Ensure enough history is available
- name: Verify git history depth
run: |
# Check if we have enough history for git diff
if [ -z "${{ github.event.before }}" ]; then
echo "Error: github.event.before is empty. Cannot perform git diff operation."
echo "This usually means the repository history is not available or fetch-depth is insufficient."
exit 1
fi
# Verify that we can access the previous commit
if ! git rev-parse --verify ${{ github.event.before }} >/dev/null 2>&1; then
echo "Error: Cannot access previous commit ${{ github.event.before }}"
echo "This indicates that fetch-depth: 2 is not sufficient or the history is not available."
exit 1
fi
echo "Git history verification passed. Previous commit ${{ github.event.before }} is accessible."
- name: Get changed files
id: changed_files
run: |
# Get all Dockerfile variants (case-insensitive)
files=$(git diff --name-only ${{ github.event.before }} ${{ github.sha }} | grep -i '\.dockerfile$' || true)
if [ -z "$files" ]; then
echo "No Dockerfile changes detected. This workflow only processes Dockerfile modifications."
echo "files=" >> $GITHUB_OUTPUT
echo "proceed_valid=false" >> $GITHUB_OUTPUT
exit 0
fi
# Count the number of changed files
file_count=$(echo "$files" | wc -l)
if [ "$file_count" -gt 1 ]; then
echo "Multiple Dockerfiles changed ($file_count files). This workflow only processes single file changes."
echo "files<<EOF" >> $GITHUB_OUTPUT
echo "$files" >> $GITHUB_OUTPUT
echo "EOF" >> $GITHUB_OUTPUT
echo "proceed_valid=false" >> $GITHUB_OUTPUT
exit 0
fi
echo "Single Dockerfile changed: $files"
echo "files<<EOF" >> $GITHUB_OUTPUT
echo "$files" >> $GITHUB_OUTPUT
echo "EOF" >> $GITHUB_OUTPUT
echo "proceed_valid=true" >> $GITHUB_OUTPUT
- name: Debug output of changed files
run: |
echo "Files from output: ${{ steps.changed_files.outputs.files }}"
- name: Validate file changes
id: validate_changes
if: steps.changed_files.outputs.proceed_valid == 'true'
run: |
changed_files="${{ steps.changed_files.outputs.files }}"
echo "Processing single file: $changed_files"
echo "valid=true" >> $GITHUB_OUTPUT
echo "dockerfile_path=$changed_files" >> $GITHUB_OUTPUT
- name: Validate version label
id: validate_version
if: steps.validate_changes.outputs.valid == 'true'
run: |
file="${{ steps.validate_changes.outputs.dockerfile_path }}"
if grep -q -E '^[^#]*LABEL\s+org\.opencontainers\.image\.version\s*=' "$file"; then
version=$(grep -E '^[^#]*LABEL\s+org\.opencontainers\.image\.version\s*=' "$file" | sed -E 's/^[^#]*LABEL\s+org\.opencontainers\.image\.version\s*=\s*"?([^"]*)"?.*/\1/')
if echo "$version" | grep -qE '^[0-9]+\.[0-9]+\.[0-9]+$'; then
echo "Version: $version ✓"
echo "valid=true" >> $GITHUB_OUTPUT
echo "version=$version" >> $GITHUB_OUTPUT
else
echo "Version: $version ✗ (invalid semver)"
echo "valid=false" >> $GITHUB_OUTPUT
echo "message=Invalid version: $version" >> $GITHUB_OUTPUT
fi
else
echo "Version: missing ✗"
echo "valid=false" >> $GITHUB_OUTPUT
echo "message=Missing version label" >> $GITHUB_OUTPUT
fi
- name: Parse optional settings
id: parse_settings
if: steps.validate_changes.outputs.valid == 'true'
run: |
file="${{ steps.validate_changes.outputs.dockerfile_path }}"
# Dev mode
if grep -q -E '^[^#]*LABEL\s+org\.opencontainers\.image\.devmode\s*=\s*true' "$file"; then
echo "DevMode: enabled"
echo "devmode=true" >> $GITHUB_OUTPUT
else
echo "DevMode: disabled"
echo "devmode=false" >> $GITHUB_OUTPUT
fi
# Scan settings
if grep -q -E '^[^#]*LABEL\s+org\.opencontainers\.image\.noscan\s*=\s*true' "$file"; then
if grep -q -E '^[^#]*LABEL\s+org\.opencontainers\.image\.noscanreason\s*' "$file"; then
reason=$(grep -E '^[^#]*LABEL\s+org\.opencontainers\.image\.noscanreason\s*' "$file")
echo "Scan: disabled ($reason)"
echo "noscan=true" >> $GITHUB_OUTPUT
echo "noscanreason=$reason" >> $GITHUB_OUTPUT
else
echo "Scan: disabled but no reason provided"
echo "noscan=false" >> $GITHUB_OUTPUT
fi
else
echo "Scan: enabled"
echo "noscan=false" >> $GITHUB_OUTPUT
fi
- name: Set proceed flag
id: set_proceed_flag
run: |
files_proceed="${{ steps.changed_files.outputs.proceed_valid }}"
changes_valid="${{ steps.validate_changes.outputs.valid }}"
version_valid="${{ steps.validate_version.outputs.valid }}"
# If files check failed, exit early
if [ "$files_proceed" != "true" ]; then
echo "(FAILED) File change validation failed"
echo "proceed_valid=false" >> $GITHUB_OUTPUT
echo "devmode=false" >> $GITHUB_OUTPUT
echo "noscan=false" >> $GITHUB_OUTPUT
exit 0
fi
# Check version validation
if [ "$version_valid" = "true" ]; then
echo "(PASSED) All validations passed"
echo "proceed_valid=true" >> $GITHUB_OUTPUT
else
echo "(FAILED) Version validation: ${{ steps.validate_version.outputs.message }}"
echo "proceed_valid=false" >> $GITHUB_OUTPUT
fi
# Pass through outputs
echo "dockerfile_path=${{ steps.validate_changes.outputs.dockerfile_path }}" >> $GITHUB_OUTPUT
echo "version=${{ steps.validate_version.outputs.version }}" >> $GITHUB_OUTPUT
echo "devmode=${{ steps.parse_settings.outputs.devmode || 'false' }}" >> $GITHUB_OUTPUT
echo "noscan=${{ steps.parse_settings.outputs.noscan || 'false' }}" >> $GITHUB_OUTPUT
- name: Set current date
if: steps.set_proceed_flag.outputs.proceed_valid == 'true'
id: date
run: |
date_tag=$(date +'%m-%d')
echo "Date tag: $date_tag"
echo "date=$date_tag" >> $GITHUB_OUTPUT
BUILD-job:
needs: PREPARE-job
runs-on: setonix-podman02
if: needs.PREPARE-job.outputs.proceed_valid == 'true'
outputs:
image_tag: ${{ steps.build_container.outputs.image_tag }}
dockerfile_name: ${{ steps.build_container.outputs.dockerfile_name }}
steps:
- name: Print hostname
run: |
echo "Hostname: $(hostname)"
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 1 # As the runs-on machine maybe different from Build, re-checkout source code. Only the current commit is needed
- name: Setup container environment
id: setup_env
run: |
echo "Setting up container environment variables..."
# Export environment variables
export XDG_DATA_HOME=/container/${USER}/data
export XDG_RUNTIME_DIR=/container/${USER}/runtime
export TMPDIR=/container/${USER}/tmp/
# Create required directories
mkdir -p ${XDG_DATA_HOME}
mkdir -p ${XDG_RUNTIME_DIR}
mkdir -p ${TMPDIR}
# Verify directories and output status
echo "Environment setup completed:"
echo "USER: ${USER}"
echo "XDG_DATA_HOME: ${XDG_DATA_HOME}"
echo "XDG_RUNTIME_DIR: ${XDG_RUNTIME_DIR}"
echo "TMPDIR: ${TMPDIR}"
# Check if directories exist
for dir in "${XDG_DATA_HOME}" "${XDG_RUNTIME_DIR}" "${TMPDIR}"; do
if [ -d "$dir" ]; then
echo "✓ Podman Directory exists: $dir"
else
echo "✗ Podman Directory missing: $dir"
exit 1
fi
done
# Set environment variables for subsequent steps
echo "XDG_DATA_HOME=${XDG_DATA_HOME}" >> $GITHUB_ENV
echo "XDG_RUNTIME_DIR=${XDG_RUNTIME_DIR}" >> $GITHUB_ENV
echo "TMPDIR=${TMPDIR}" >> $GITHUB_ENV
- name: Setup podman cache
id: setup_cache
run: |
# Setup podman cache directory
CACHE_DIR="/container/${USER}/podman-cache"
mkdir -p "$CACHE_DIR"
echo "[CACHE SETUP]:"
echo " Cache directory: $CACHE_DIR"
# Configure podman to use cache
export TMPDIR="${TMPDIR:-/tmp}"
# Check cache size if exists
if [ -d "$CACHE_DIR" ]; then
cache_size=$(du -sh "$CACHE_DIR" 2>/dev/null | cut -f1 || echo "0")
echo " Current cache size: $cache_size"
else
echo " Cache directory created (empty)"
fi
echo "CACHE_DIR=$CACHE_DIR" >> $GITHUB_ENV
- name: Build container with podman
id: build_container
run: |
# Get variables from PREPARE-job
dockerfile_path="${{ needs.PREPARE-job.outputs.dockerfile_path }}"
version="${{ needs.PREPARE-job.outputs.version }}"
# Extract filename without extension for tag
dockerfile_name=$(basename "$dockerfile_path" .dockerfile)
echo "Building container with podman..."
echo "Dockerfile: $dockerfile_path"
echo "Tag: ${dockerfile_name}:${version}"
# Build with layer caching enabled
podman build --format=docker \
--layers \
-f "$dockerfile_path" \
-t "${dockerfile_name}:${version}" \
$(dirname "$dockerfile_path")
# Verify the image was built
if podman images | grep -q "${dockerfile_name}"; then
echo "✓ Image built successfully: ${dockerfile_name}:${version}"
# Show caching information
echo "[LAYER CACHE]: Podman layer caching enabled"
echo "[OPTIMIZATION]: Subsequent builds will reuse unchanged layers"
echo "[NOTE]: APT cache within container is cleaned as per Dockerfile"
# Show all images to see layer reuse
echo "[ALL IMAGES]:"
podman images | head -10
else
echo "✗ Failed to build image: ${dockerfile_name}:${version}"
exit 1
fi
# Set outputs for next step
# Output Variables:
# - image_tag: Complete image tag with version (e.g., "ex1:0.0.5")
# - dockerfile_name: Base name without extension (e.g., "ex1")
echo "image_tag=${dockerfile_name}:${version}" >> $GITHUB_OUTPUT
echo "dockerfile_name=${dockerfile_name}" >> $GITHUB_OUTPUT
- name: Save container to archive
id: save_container
run: |
# Get variables from previous step
image_tag="${{ steps.build_container.outputs.image_tag }}"
dockerfile_name="${{ steps.build_container.outputs.dockerfile_name }}"
version="${{ needs.PREPARE-job.outputs.version }}"
# Define output file
output_file="${dockerfile_name}_${version}.tar"
echo "Saving container to Docker archive..."
echo "Image: $image_tag"
echo "Output: $output_file"
# Save with podman using Docker archive format (compatible with Trivy)
podman save --format docker-archive "$image_tag" -o "$output_file"
# Verify the file was created
if [ -f "$output_file" ]; then
file_size=$(ls -lh "$output_file" | awk '{print $5}')
echo "✓ Archive saved successfully: $output_file (Size: $file_size)"
else
echo "✗ Failed to save archive: $output_file"
exit 1
fi
# Set outputs
# Output Variables:
# - archive_file: Archive filename only (e.g., "ex1_0.0.5.tar")
# - archive_path: Full path to archive (e.g., "/home/runner/work/repo/ex1_0.0.5.tar")
echo "archive_file=$output_file" >> $GITHUB_OUTPUT
echo "archive_path=${PWD}/$output_file" >> $GITHUB_OUTPUT
- name: Copy to persistent local storage
id: persistent_storage
run: |
# Get variables
archive_file="${{ steps.save_container.outputs.archive_file }}"
dockerfile_name="${{ steps.build_container.outputs.dockerfile_name }}"
version="${{ needs.PREPARE-job.outputs.version }}"
# Define persistent storage directory
PERSISTENT_DIR="/container/${USER}/artifacts"
echo "Setting up persistent storage at: $PERSISTENT_DIR"
# Create directory if it doesn't exist
mkdir -p "$PERSISTENT_DIR"
# Copy archive to persistent storage with timestamp
timestamp=$(date +"%Y%m%d_%H%M%S")
persistent_filename="${dockerfile_name}_${version}_${timestamp}.tar"
persistent_path="$PERSISTENT_DIR/$persistent_filename"
echo "Copying archive to persistent storage..."
echo "Source: $archive_file"
echo "Destination: $persistent_path"
if cp "$archive_file" "$persistent_path"; then
# Verify the copy
if [ -f "$persistent_path" ]; then
file_size=$(ls -lh "$persistent_path" | awk '{print $5}')
echo "✓ Archive successfully copied to persistent storage"
echo " [LOCATION]: $persistent_path"
echo " [SIZE]: $file_size"
echo " [TIMESTAMP]: $timestamp"
# Also create a symlink to latest version (without timestamp)
latest_link="$PERSISTENT_DIR/${dockerfile_name}_latest.tar"
ln -sf "$persistent_filename" "$latest_link"
echo " [LATEST LINK]: $latest_link"
# List all archives for this dockerfile
echo " [ALL VERSIONS]:"
ls -la "$PERSISTENT_DIR/${dockerfile_name}_"*.tar 2>/dev/null || echo " (No previous versions found)"
else
echo "✗ Archive copy verification failed"
exit 1
fi
else
echo "✗ Failed to copy archive to persistent storage"
exit 1
fi
# Set outputs
echo "persistent_path=$persistent_path" >> $GITHUB_OUTPUT
echo "persistent_dir=$PERSISTENT_DIR" >> $GITHUB_OUTPUT
echo "latest_link=$latest_link" >> $GITHUB_OUTPUT
SCAN-AND-REPORT-job:
needs: [BUILD-job, PREPARE-job]
runs-on: setonix-podman02
if: needs.PREPARE-job.outputs.proceed_valid == 'true' && needs.PREPARE-job.outputs.noscan != 'true'
env:
DOCKERFILE_NAME: ${{ needs.BUILD-job.outputs.dockerfile_name }}
VERSION: ${{ needs.PREPARE-job.outputs.version }}
REPORT_DIR: ./trivy-reports
steps:
- name: Print host
run: |
echo "Hostname: $(hostname)"
- name: Locate image archive (from persistent storage)
id: locate
shell: bash
run: |
set -euo pipefail
shopt -s nullglob
PERSISTENT_DIR="/container/${USER}/artifacts"
echo "Searching archives in: ${PERSISTENT_DIR}"
# 精准匹配 version,有则用;否则回退 latest
matches=("${PERSISTENT_DIR}/${DOCKERFILE_NAME}_${VERSION}_"*.tar)
if (( ${#matches[@]} == 0 )); then
matches=("${PERSISTENT_DIR}/${DOCKERFILE_NAME}_latest.tar")
fi
if (( ${#matches[@]} == 0 )) || [[ ! -e "${matches[0]}" ]]; then
echo "✗ Archive not found."
echo " Tried: ${PERSISTENT_DIR}/${DOCKERFILE_NAME}_${VERSION}_*.tar"
echo " Fallback: ${PERSISTENT_DIR}/${DOCKERFILE_NAME}_latest.tar"
ls -la "${PERSISTENT_DIR}" || true
exit 1
fi
archive_file="$(ls -t "${matches[@]}" | head -1)"
archive_name="$(basename "${archive_file}")"
cp -f "${archive_file}" "./${archive_name}"
size="$(ls -lh "./${archive_name}" | awk '{print $5}')"
echo "✓ Using archive: ${archive_file} (size: ${size})"
echo "archive_path=${archive_file}" >> "$GITHUB_OUTPUT"
echo "archive_name=${archive_name}" >> "$GITHUB_OUTPUT"
- name: Prepare report directory
run: |
mkdir -p "${REPORT_DIR}"
echo "Report directory created: ${REPORT_DIR}"
# Generate JSON report first
- name: Trivy (JSON scan for processing)
uses: aquasecurity/trivy-action@master
with:
scan-type: image
input: ./${{ steps.locate.outputs.archive_name }}
format: json
output: ${{ env.REPORT_DIR }}/scan-results.json
severity: CRITICAL,HIGH,MEDIUM,LOW,UNKNOWN
ignore-unfixed: true
vuln-type: os,library
exit-code: '0'
hide-progress: true
cache: true
env:
TRIVY_SKIP_DB_UPDATE: false
TRIVY_SKIP_JAVA_DB_UPDATE: false
# Process JSON to create readable summary
- name: Generate readable summary from JSON
run: |
python3 -c "
import json
import sys
import os
json_file = '${REPORT_DIR}/scan-results.json'
output_file = '${REPORT_DIR}/summary.md'
try:
with open(json_file, 'r') as f:
data = json.load(f)
# Initialize counters
counts = {'CRITICAL': 0, 'HIGH': 0, 'MEDIUM': 0, 'LOW': 0, 'UNKNOWN': 0}
vulnerabilities = []
# Process results
if 'Results' in data and data['Results']:
for result in data['Results']:
if 'Vulnerabilities' in result and result['Vulnerabilities']:
for vuln in result['Vulnerabilities']:
severity = vuln.get('Severity', 'UNKNOWN')
if severity in counts:
counts[severity] += 1
vulnerabilities.append(vuln)
# Generate summary
with open(output_file, 'w') as f:
artifact_name = data.get('ArtifactName', 'Unknown')
f.write(f'## Trivy Scan Summary — {artifact_name}\n\n')
f.write(f'**Target:** {artifact_name}\n')
f.write(f'**Total Vulnerabilities:** {sum(counts.values())}\n\n')
f.write('### Severity Counts\n')
for severity, count in counts.items():
if count > 0:
f.write(f'- **{severity}:** {count}\n')
else:
f.write(f'- {severity}: {count}\n')
f.write('\n')
# Show critical and high details
critical_high = [v for v in vulnerabilities if v.get('Severity') in ['CRITICAL', 'HIGH']]
if critical_high:
f.write('### Critical & High Severity Vulnerabilities\n\n')
f.write('| Severity | CVE ID | Package | Installed | Fixed |\n')
f.write('|----------|--------|---------|-----------|-------|\n')
for vuln in critical_high[:15]: # Limit to first 15
severity = vuln.get('Severity', 'N/A')
cve_id = vuln.get('VulnerabilityID', 'N/A')
pkg = vuln.get('PkgName', 'N/A')
installed = vuln.get('InstalledVersion', 'N/A')
fixed = vuln.get('FixedVersion', 'N/A') or '❌'
f.write(f'| {severity} | {cve_id} | {pkg} | {installed} | {fixed} |\n')
if len(critical_high) > 15:
f.write(f'\n*... and {len(critical_high) - 15} more critical/high vulnerabilities.*\n')
elif sum(counts.values()) > 0:
f.write('### Good News! 🎉\n\n')
f.write('No CRITICAL or HIGH severity vulnerabilities found.\n')
if counts['MEDIUM'] > 0:
f.write(f'Only {counts[\"MEDIUM\"]} MEDIUM severity issues detected.\n')
else:
f.write('### Excellent! ✅\n\n')
f.write('No vulnerabilities found in this image.\n')
print(f'Successfully generated summary: {output_file}')
except Exception as e:
with open(output_file, 'w') as f:
f.write(f'### ❌ Error Processing Scan Results\n\n')
f.write(f'**Error:** {str(e)}\n\n')
f.write(f'**JSON file:** {json_file}\n')
f.write(f'**File exists:** {os.path.exists(json_file)}\n')
print(f'Error: {e}', file=sys.stderr)
"
- name: Debug Trivy output
run: |
echo "=== Trivy output file exists? ==="
ls -la "${REPORT_DIR}/summary.md" || echo "Summary file not found"
echo ""
echo "=== Trivy output content ==="
cat "${REPORT_DIR}/summary.md" || echo "Cannot read summary file"
echo ""
echo "=== Report directory contents ==="
ls -la "${REPORT_DIR}/" || echo "Report directory not found"
- name: Append to GitHub Actions summary
run: |
{
echo "## Trivy Scan Report for \`${{ env.DOCKERFILE_NAME }}\`"
echo ""
echo "- **Scan target**: \`${{ steps.locate.outputs.archive_name }}\`"
echo "- **Archive source**: \`${{ steps.locate.outputs.archive_path }}\`"
echo "- **Version**: \`${{ env.VERSION }}\`"
echo ""
# Check if summary file exists and has content
if [ -f "${REPORT_DIR}/summary.md" ] && [ -s "${REPORT_DIR}/summary.md" ]; then
echo "### Scan Results"
cat "${REPORT_DIR}/summary.md"
else
echo "### ⚠️ Scan Results"
echo ""
echo "**Status:** Summary file not generated or empty"
echo ""
echo "**Possible causes:**"
echo "- Trivy template processing failed"
echo "- Archive file format issue"
echo "- Trivy action configuration problem"
echo ""
echo "**Debug info:**"
echo "- Template file: \`.github/trivy-summary.tpl\`"
echo "- Expected output: \`${REPORT_DIR}/summary.md\`"
echo ""
echo "Check the debug steps above for more details."
fi
} >> "$GITHUB_STEP_SUMMARY"
# Generate SARIF report for GitHub Security
- name: Trivy (SARIF for GitHub Security)
uses: aquasecurity/trivy-action@master
if: always()
with:
scan-type: image
input: ./${{ steps.locate.outputs.archive_name }}
format: sarif
output: ${{ env.REPORT_DIR }}/trivy-results.sarif
severity: CRITICAL,HIGH,MEDIUM
ignore-unfixed: true
vuln-type: os,library
exit-code: '0'
hide-progress: true
cache: true
env:
TRIVY_SKIP_DB_UPDATE: true
TRIVY_SKIP_JAVA_DB_UPDATE: true
- name: Upload SARIF to Security tab
uses: github/codeql-action/upload-sarif@v3
if: always()
with:
sarif_file: ${{ env.REPORT_DIR }}/trivy-results.sarif
- name: Upload Trivy artifacts
uses: actions/upload-artifact@v4
if: always()
with:
name: trivy-reports-${{ env.DOCKERFILE_NAME }}-${{ env.VERSION }}
path: ${{ env.REPORT_DIR }}/
retention-days: 30
PUSH-PRIV-job:
needs: [BUILD-job, PREPARE-job]
runs-on: setonix-podman02
if: needs.PREPARE-job.outputs.proceed_valid == 'true'
env:
DOCKERFILE_NAME: ${{ needs.BUILD-job.outputs.dockerfile_name }}
VERSION: ${{ needs.PREPARE-job.outputs.version }}
BUCKET: cicd-container-image
PERSISTENT_DIR: /container/${{ github.actor }}/artifacts
steps:
- name: Print hostname
run: |
echo "Hostname: $(hostname)"
- name: Locate and copy archive from persistent storage
id: locate_archive
run: |
# Find the archive file (version-specific or latest)
PERSISTENT_DIR="/container/${USER}/artifacts"
echo "Searching for archive in: $PERSISTENT_DIR"
# Find exact version match first, then fallback to latest
archive_file=""
if ls "${PERSISTENT_DIR}/${DOCKERFILE_NAME}_${VERSION}_"*.tar 1> /dev/null 2>&1; then
archive_file=$(ls -t "${PERSISTENT_DIR}/${DOCKERFILE_NAME}_${VERSION}_"*.tar | head -1)
elif [ -f "${PERSISTENT_DIR}/${DOCKERFILE_NAME}_latest.tar" ]; then
archive_file="${PERSISTENT_DIR}/${DOCKERFILE_NAME}_latest.tar"
fi
if [ -z "$archive_file" ]; then
echo "✗ Archive not found in persistent storage"
echo " Searched for: ${DOCKERFILE_NAME}_${VERSION}_*.tar"
echo " Also searched for: ${DOCKERFILE_NAME}_latest.tar"
ls -la "$PERSISTENT_DIR" || echo " Directory not accessible"
exit 1
fi
# Set destination filename (without timestamp)
destination_filename="${DOCKERFILE_NAME}_${VERSION}.tar"
# Copy to workspace
cp "$archive_file" "./$destination_filename"
if [ -f "./$destination_filename" ]; then
file_size=$(ls -lh "./$destination_filename" | awk '{print $5}')
echo "✓ Archive copied successfully"
echo " [SOURCE]: $archive_file"
echo " [DESTINATION]: ${PWD}/$destination_filename"
echo " [SIZE]: $file_size"
else
echo "✗ Failed to copy archive"
exit 1
fi
echo "archive_source=$archive_file" >> $GITHUB_OUTPUT
echo "archive_file=$destination_filename" >> $GITHUB_OUTPUT
echo "archive_path=${PWD}/$destination_filename" >> $GITHUB_OUTPUT
- name: Setup rclone
uses: ./.github/actions/setup-rclone
with:
access_key_id: ${{ secrets.ACACIA_ACCESS_KEY_ID }}
secret_access_key: ${{ secrets.ACACIA_SECRET_ACCESS_KEY }}
endpoint: https://projects.pawsey.org.au
bucket: ${{ env.BUCKET }}
destination_path: ${{ steps.locate_archive.outputs.archive_file }}
- name: Upload container archive to S3 with rclone
run: |
set -euo pipefail
archive_file="${{ steps.locate_archive.outputs.archive_file }}"
bucket="${{ env.BUCKET }}"
echo "=== S3 Upload Configuration ==="
echo "Archive file: $archive_file"
echo "Bucket: $bucket"
echo "Destination: $archive_file"
echo ""
# Calculate file size for optimization
FILE_SIZE=$(wc -c < "$archive_file")
echo "File size: $FILE_SIZE bytes"
# Set rclone parameters based on file size
if [ "$FILE_SIZE" -lt $((1024 * 1024 * 500)) ]; then
# < 500MB
S3_CHUNK_SIZE="16M"
S3_UPLOAD_CONCURRENCY=4
MULTI_THREAD_STREAMS=2
elif [ "$FILE_SIZE" -lt $((1024 * 1024 * 5000)) ]; then
# 500MB - 5GB
S3_CHUNK_SIZE="64M"
S3_UPLOAD_CONCURRENCY=8
MULTI_THREAD_STREAMS=4
else
# > 5GB
S3_CHUNK_SIZE="128M"
S3_UPLOAD_CONCURRENCY=16
MULTI_THREAD_STREAMS=8
fi
echo "Optimized settings for file size:"
echo " S3 chunk size: $S3_CHUNK_SIZE"
echo " Upload concurrency: $S3_UPLOAD_CONCURRENCY"
echo " Multi-thread streams: $MULTI_THREAD_STREAMS"
echo ""
# Execute rclone upload
echo "Starting upload..."
./rclone copy "$archive_file" pawsey0001:"$bucket/" \
--multi-thread-streams=$MULTI_THREAD_STREAMS \
--s3-chunk-size=$S3_CHUNK_SIZE \
--s3-upload-concurrency=$S3_UPLOAD_CONCURRENCY \
--progress
echo "✓ Upload completed successfully"
echo " [BUCKET]: $bucket"
echo " [DESTINATION]: $archive_file"
echo " [SIZE]: $(ls -lh "$archive_file" | awk '{print $5}')"
- name: "[Setonix Registry] Login and Push"
if: needs.PREPARE-job.outputs.setonixreg_available == 'true'
continue-on-error: true
run: |
set +e # Don't exit on errors to allow independent execution
echo "=========================================="
echo " Setonix Registry Push Process"
echo "=========================================="
# Load image from archive first
echo "📦 Loading image from archive..."
archive_file="${{ steps.locate_archive.outputs.archive_file }}"
if podman load -i "$archive_file"; then
echo "✓ Image loaded successfully from archive"
else
echo "✗ Failed to load image from archive"
exit 1
fi
# Login to Setonix Registry
echo "🔐 Logging into Setonix Registry..."
if podman login https://setonix-registry.pawsey.org.au -u "${{ vars.SETONIXREG_USERNAME }}" -p "${{ secrets.SETONIXREG_PASS }}"; then
echo "✓ Setonix Registry login successful"
else
echo "✗ Setonix Registry login failed"
exit 1
fi
# Tag and push
image_tag="${DOCKERFILE_NAME}:${VERSION}"
setonix_tag="setonix-registry.pawsey.org.au/${{ vars.SETONIXREG_USERNAME }}/${DOCKERFILE_NAME}:${VERSION}"
echo ""
echo "🏷️ Tagging image for Setonix Registry:"
echo " Source: $image_tag"
echo " Target: $setonix_tag"
if podman tag "$image_tag" "$setonix_tag"; then
echo "✓ Image tagged successfully"
else
echo "✗ Image tagging failed"
exit 1
fi
echo ""
echo "📤 Pushing to Setonix Registry..."
if podman push "$setonix_tag"; then
echo "✅ Successfully pushed to Setonix Registry: $setonix_tag"
else
echo "❌ Failed to push to Setonix Registry"
exit 1
fi
# Cleanup local tagged image
echo ""
echo "🧹 Cleaning up local Setonix tagged image..."
podman rmi "$setonix_tag" 2>/dev/null || echo " (Setonix tag already removed or not found)"
podman rmi "$image_tag" 2>/dev/null || echo " (Base image already removed or not found)"
PUSH-PUBLIC-job:
needs: [BUILD-job, PREPARE-job, PUSH-PRIV-job]
runs-on: setonix-podman02
if: needs.PREPARE-job.outputs.proceed_valid == 'true'
environment:
name: manual_approval
env:
DOCKERFILE_NAME: ${{ needs.BUILD-job.outputs.dockerfile_name }}
VERSION: ${{ needs.PREPARE-job.outputs.version }}
PERSISTENT_DIR: /container/${{ github.actor }}/artifacts
steps:
- name: Print hostname and check registry credentials
run: |
echo "Hostname: $(hostname)"
echo ""
echo "=== Public Registry Push Status ==="
# Check Docker Hub credentials
if [ "${{ needs.PREPARE-job.outputs.dockerhub_available }}" = "true" ]; then
echo "[✓] Docker Hub: Credentials available - will push to ${{ vars.DOCKERHUB_USERNAME }}/${DOCKERFILE_NAME}:${VERSION}"
else
echo "[skip] Docker Hub: Skipping (missing DOCKERHUB_USERNAME or DOCKERHUB_TOKEN)"
fi
# Check Quay.io credentials
if [ "${{ needs.PREPARE-job.outputs.quayio_available }}" = "true" ]; then
echo "[✓] Quay.io: Credentials available - will push to quay.io/${{ vars.QUAYIO_USERNAME }}/${DOCKERFILE_NAME}:${VERSION}"
else
echo "[skip] Quay.io: Skipping (missing QUAYIO_USERNAME or QUAYIO_TOKEN)"
fi
# Check Setonix Registry credentials
if [ "${{ needs.PREPARE-job.outputs.setonixreg_available }}" = "true" ]; then
echo "[✓] Setonix Registry: Credentials available - already pushed to setonix-registry.pawsey.org.au/${{ vars.SETONIXREG_USERNAME }}/${DOCKERFILE_NAME}:${VERSION} in PUSH-PRIV-job"
else
echo "[skip] Setonix Registry: Skipping (missing SETONIXREG_USERNAME or SETONIXREG_PASS)"
fi
# Check if any registry is available
if [ "${{ needs.PREPARE-job.outputs.dockerhub_available }}" != "true" ] && [ "${{ needs.PREPARE-job.outputs.quayio_available }}" != "true" ]; then
echo ""
echo "[WARNING] No public registry credentials found. This job will complete without pushing to any public registry."
echo " To enable public registry pushes, configure the required variables and secrets:"
echo " - Docker Hub: DOCKERHUB_USERNAME (variable) + DOCKERHUB_TOKEN (secret)"
echo " - Quay.io: QUAYIO_USERNAME (variable) + QUAYIO_TOKEN (secret)"
echo " - Setonix Registry: SETONIXREG_USERNAME (variable) + SETONIXREG_PASS (secret)"
fi
echo ""
- name: Locate and load image from persistent storage
id: locate_and_load
if: needs.PREPARE-job.outputs.dockerhub_available == 'true' || needs.PREPARE-job.outputs.quayio_available == 'true'
run: |
# Find the archive file (version-specific or latest)
PERSISTENT_DIR="/container/${USER}/artifacts"
echo "Searching for archive in: $PERSISTENT_DIR"
# Find exact version match first, then fallback to latest
archive_file=""
if ls "${PERSISTENT_DIR}/${DOCKERFILE_NAME}_${VERSION}_"*.tar 1> /dev/null 2>&1; then
archive_file=$(ls -t "${PERSISTENT_DIR}/${DOCKERFILE_NAME}_${VERSION}_"*.tar | head -1)
elif [ -f "${PERSISTENT_DIR}/${DOCKERFILE_NAME}_latest.tar" ]; then
archive_file="${PERSISTENT_DIR}/${DOCKERFILE_NAME}_latest.tar"
fi
if [ -z "$archive_file" ]; then
echo "✗ Archive not found in persistent storage"
echo " Searched for: ${DOCKERFILE_NAME}_${VERSION}_*.tar"
echo " Also searched for: ${DOCKERFILE_NAME}_latest.tar"
ls -la "$PERSISTENT_DIR" || echo " Directory not accessible"
exit 1
fi
# Load image into podman
echo "✓ Loading image from archive: $archive_file"
podman load -i "$archive_file"
# Verify image was loaded
if podman images | grep -q "${DOCKERFILE_NAME}"; then
echo "✓ Image loaded successfully: ${DOCKERFILE_NAME}:${VERSION}"
podman images | grep "${DOCKERFILE_NAME}"
else
echo "✗ Failed to load image: ${DOCKERFILE_NAME}:${VERSION}"
exit 1
fi
echo "archive_source=$archive_file" >> $GITHUB_OUTPUT
echo "image_tag=${DOCKERFILE_NAME}:${VERSION}" >> $GITHUB_OUTPUT
- name: Skip message when no credentials available
if: needs.PREPARE-job.outputs.dockerhub_available != 'true' && needs.PREPARE-job.outputs.quayio_available != 'true'
run: |
echo ""
echo "=========================================="
echo " Public Registry Push: SKIPPED"
echo "=========================================="
echo ""
echo "No public registry credentials were found."
echo "The workflow will complete successfully without pushing to public registries."
echo ""
echo "This is normal behavior when:"
echo "• You only want to use private storage (S3)"
echo "• Credentials are not yet configured"
echo "• Testing the build pipeline"
echo ""
echo "To enable public registry pushes in the future, configure:"
echo "• Docker Hub: Set DOCKERHUB_USERNAME (repo variable) and DOCKERHUB_TOKEN (repo secret)"
echo "• Quay.io: Set QUAYIO_USERNAME (repo variable) and QUAYIO_TOKEN (repo secret)"
echo ""
echo "[✓] Job completed successfully (no action required)"
echo ""
- name: Prepare Docker config directory
if: needs.PREPARE-job.outputs.dockerhub_available == 'true' || needs.PREPARE-job.outputs.quayio_available == 'true'
run: |
mkdir -p ~/.docker
echo "Created Docker config directory: ~/.docker"
# ============================================
# Docker Hub Push (Independent Operation)
# ============================================
- name: "[Docker Hub] Login and Push"
if: needs.PREPARE-job.outputs.dockerhub_available == 'true'
continue-on-error: true
run: |
set +e # Don't exit on errors to allow independent execution
echo "=========================================="
echo " Docker Hub Push Process"
echo "=========================================="
# Login to Docker Hub
echo "🔐 Logging into Docker Hub..."
if podman login docker.io -u "${{ vars.DOCKERHUB_USERNAME }}" -p "${{ secrets.DOCKERHUB_TOKEN }}"; then
echo "✓ Docker Hub login successful"
else
echo "✗ Docker Hub login failed"
exit 1
fi
# Tag and push
image_tag="${{ steps.locate_and_load.outputs.image_tag }}"
dockerhub_tag="${{ vars.DOCKERHUB_USERNAME }}/${DOCKERFILE_NAME}:${VERSION}"
echo ""
echo "🏷️ Tagging image for Docker Hub:"
echo " Source: $image_tag"
echo " Target: $dockerhub_tag"
if podman tag "$image_tag" "$dockerhub_tag"; then
echo "✓ Image tagged successfully"
else
echo "✗ Image tagging failed"
exit 1
fi
echo ""
echo "📤 Pushing to Docker Hub..."
if podman push "$dockerhub_tag"; then
echo "✅ Successfully pushed to Docker Hub: $dockerhub_tag"
else
echo "❌ Failed to push to Docker Hub"
exit 1
fi
# ============================================
# Quay.io Push (Independent Operation)
# ============================================
- name: "[Quay.io] Login and Push"
if: needs.PREPARE-job.outputs.quayio_available == 'true'
continue-on-error: true
run: |
set +e # Don't exit on errors to allow independent execution
echo "=========================================="
echo " Quay.io Push Process"
echo "=========================================="
# Login to Quay.io
echo "🔐 Logging into Quay.io..."
if podman login quay.io -u "${{ vars.QUAYIO_USERNAME }}" -p "${{ secrets.QUAYIO_TOKEN }}"; then
echo "✓ Quay.io login successful"
else
echo "✗ Quay.io login failed"
exit 1
fi
# Tag and push
image_tag="${{ steps.locate_and_load.outputs.image_tag }}"
quayio_tag="quay.io/${{ vars.QUAYIO_USERNAME }}/${DOCKERFILE_NAME}:${VERSION}"
echo ""
echo "🏷️ Tagging image for Quay.io:"
echo " Source: $image_tag"
echo " Target: $quayio_tag"
if podman tag "$image_tag" "$quayio_tag"; then
echo "✓ Image tagged successfully"
else
echo "✗ Image tagging failed"
exit 1
fi
echo ""
echo "📤 Pushing to Quay.io..."
if podman push "$quayio_tag"; then
echo "✅ Successfully pushed to Quay.io: $quayio_tag"
else
echo "❌ Failed to push to Quay.io"
exit 1
fi
- name: Cleanup local images
if: always() && (needs.PREPARE-job.outputs.dockerhub_available == 'true' || needs.PREPARE-job.outputs.quayio_available == 'true')
run: |
echo "🧹 Cleaning up local images..."
image_tag="${{ steps.locate_and_load.outputs.image_tag }}"
# Remove original image
echo "Removing base image: $image_tag"
podman rmi "$image_tag" 2>/dev/null || echo " (Base image already removed or not found)"
# Remove Docker Hub tagged image if it was created
if [ "${{ needs.PREPARE-job.outputs.dockerhub_available }}" = "true" ]; then
dockerhub_tag="${{ vars.DOCKERHUB_USERNAME }}/${DOCKERFILE_NAME}:${VERSION}"
echo "Removing Docker Hub tag: $dockerhub_tag"
podman rmi "$dockerhub_tag" 2>/dev/null || echo " (Docker Hub tag already removed or not found)"
fi
# Remove Quay.io tagged image if it was created
if [ "${{ needs.PREPARE-job.outputs.quayio_available }}" = "true" ]; then
quayio_tag="quay.io/${{ vars.QUAYIO_USERNAME }}/${DOCKERFILE_NAME}:${VERSION}"
echo "Removing Quay.io tag: $quayio_tag"
podman rmi "$quayio_tag" 2>/dev/null || echo " (Quay.io tag already removed or not found)"
fi
echo "✅ Local image cleanup completed"
# CLEANUP-job:
# needs: [APPROVE-PUSH-PUB-job,PUSH-PRIV-job, SCAN-AND-REPORT-job, BUILD-job, PREPARE-job]
# if: always()
# runs-on: ${{ needs.PREPARE-job.outputs.runner_label }}
# steps:
# - name: Clean-up
# run: |
# sudo rm -rf $HOME/runner/artifacts/${{ needs.PREPARE-job.outputs.dockerfile_name }}-${{ needs.PREPARE-job.outputs.platform_tag }}
# DEPLOY-job:
# needs: [PUSH-PRIV-job,PREPARE-job]
# runs-on: Ella
# if: needs.PREPARE-job.outputs.platform_tag == 'arm'
# env:
# BUCKET: ${{ vars.ACACIA_BUCKETNAME }} # BYO or pawsey0001-image-compilation if compile for project
# DESTINATION_PATH: ${{ needs.PREPARE-job.outputs.dockerfile_name }}-${{ needs.PREPARE-job.outputs.platform_tag }}/${{ needs.PREPARE-job.outputs.date }}
# #environment:
# # name: manual_approval
# steps:
# - name: Checkout repository
# uses: actions/checkout@v4
# with:
# fetch-depth: 1 # As the runs-on machine maybe different from Build, re-checkout source code. Only the current commit is needed
# - name: Setup rclone
# uses: ./.github/actions/setup-rclone
# with:
# access_key_id: ${{ secrets.ACACIA_ACCESS_KEY_ID }}
# secret_access_key: ${{ secrets.ACACIA_SECRET_ACCESS_KEY }}
# endpoint: https://projects.pawsey.org.au
# bucket: ${{ env.BUCKET }}
# destination_path: ${{ env.DESTINATION_PATH }}
# - name: Deploy ARM image to Ella
# run: |
# echo "Deploying ARM image to Ella"
# echo "Hostname: $(hostname)"
# echo "Deploying image: ${{ needs.PREPARE-job.outputs.dockerfile_name }}-${{ needs.PREPARE-job.outputs.platform_tag }}:${{ needs.PREPARE-job.outputs.date }}" to Ella
# mkdir -p $MYSCRATCH/image/${{ needs.PREPARE-job.outputs.dockerfile_name }}/
# rclone copy pawsey0001:"${{ env.BUCKET }}/${{ env.DESTINATION_PATH }}/image.tar" $MYSCRATCH/image/${{ needs.PREPARE-job.outputs.dockerfile_name }}/
# - name: Convert to Singularity File
# run: |
# echo "Converting to Singularity File"
# echo "Converting image: ${{ needs.PREPARE-job.outputs.dockerfile_name }}-${{ needs.PREPARE-job.outputs.platform_tag }}:${{ needs.PREPARE-job.outputs.date }}" to Singularity
# source ~/.bashrc
# singularity build --force $MYSCRATCH/image/${{ needs.PREPARE-job.outputs.dockerfile_name }}/${{ needs.PREPARE-job.outputs.dockerfile_name }}-${{ needs.PREPARE-job.outputs.platform_tag }}.sif docker-archive://$MYSCRATCH/image/${{ needs.PREPARE-job.outputs.dockerfile_name }}/image.tar