cicd:experiment #404
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| name: ci | |
| # Version 1.0.0 | |
| # This workflow is triggered on push events to the repository | |
| # Please find readme.md for the usage of this workflow | |
| on: | |
| push: | |
| branches: | |
| - 'cicd*' | |
| jobs: | |
| PREPARE-job: | |
| runs-on: ubuntu-latest | |
| outputs: | |
| # Job Output Variables: | |
| # - proceed_valid: Whether to proceed with build (true/false, e.g., "true") | |
| # - dockerfile_path: Path to the modified Dockerfile (e.g., "x86/ex1.dockerfile") | |
| # - version: Semver version from Dockerfile (e.g., "0.0.5") | |
| # - devmode: Development mode flag (true/false, e.g., "true") | |
| # - noscan: Skip security scan flag (true/false, e.g., "false") | |
| # - files: List of changed files for debugging (e.g., "x86/ex1.dockerfile") | |
| # - dockerhub_available: Whether Docker Hub credentials are available (true/false) | |
| # - quayio_available: Whether Quay.io credentials are available (true/false) | |
| proceed_valid: ${{ steps.set_proceed_flag.outputs.proceed_valid }} | |
| dockerfile_path: ${{ steps.set_proceed_flag.outputs.dockerfile_path }} | |
| version: ${{ steps.set_proceed_flag.outputs.version }} | |
| devmode: ${{ steps.set_proceed_flag.outputs.devmode }} | |
| noscan: ${{ steps.set_proceed_flag.outputs.noscan }} | |
| files: ${{ steps.changed_files.outputs.files }} | |
| date: ${{ steps.date.outputs.date }} | |
| dockerhub_available: ${{ steps.check_vars_secrets.outputs.dockerhub_available }} | |
| quayio_available: ${{ steps.check_vars_secrets.outputs.quayio_available }} | |
| setonixreg_available: ${{ steps.check_vars_secrets.outputs.setonixreg_available }} | |
| steps: | |
| - name: Initialize runner configuration | |
| id: set_default_runner_label | |
| run: | | |
| echo "runner_label=ubuntu-latest" >> $GITHUB_OUTPUT | |
| - name: Validate environment credentials | |
| id: check_vars_secrets | |
| run: | | |
| missing_vars=() | |
| missing_secrets=() | |
| # check Variables | |
| if [ -z "${{ vars.DOCKERHUB_USERNAME }}" ]; then | |
| missing_vars+=("DOCKERHUB_USERNAME") | |
| fi | |
| if [ -z "${{ vars.QUAYIO_USERNAME }}" ]; then | |
| missing_vars+=("QUAYIO_USERNAME") | |
| fi | |
| if [ -z "${{ vars.SETONIXREG_USERNAME }}" ]; then | |
| missing_vars+=("SETONIXREG_USERNAME") | |
| fi | |
| if [ -z "${{ vars.ACACIA_BUCKETNAME }}" ]; then | |
| missing_vars+=("ACACIA_BUCKETNAME") | |
| fi | |
| # check Secrets | |
| if [ -z "${{ secrets.PAT_TOKEN }}" ]; then | |
| missing_secrets+=("PAT_TOKEN") | |
| fi | |
| if [ -z "${{ secrets.DOCKERHUB_TOKEN }}" ]; then | |
| missing_secrets+=("DOCKERHUB_TOKEN") | |
| fi | |
| if [ -z "${{ secrets.QUAYIO_TOKEN }}" ]; then | |
| missing_secrets+=("QUAYIO_TOKEN") | |
| fi | |
| if [ -z "${{ secrets.SETONIXREG_PASS }}" ]; then | |
| missing_secrets+=("SETONIXREG_PASS") | |
| fi | |
| if [ -z "${{ secrets.ACACIA_ACCESS_KEY_ID }}" ]; then | |
| missing_secrets+=("ACACIA_ACCESS_KEY_ID") | |
| fi | |
| if [ -z "${{ secrets.ACACIA_SECRET_ACCESS_KEY }}" ]; then | |
| missing_secrets+=("ACACIA_SECRET_ACCESS_KEY") | |
| fi | |
| # Log status of variables and secrets | |
| if [ ${#missing_vars[@]} -ne 0 ]; then | |
| echo "Missing Variables: ${missing_vars[@]}" | |
| else | |
| echo "All required variables are set." | |
| fi | |
| if [ ${#missing_secrets[@]} -ne 0 ]; then | |
| echo "Missing Secrets: ${missing_secrets[@]}" | |
| else | |
| echo "All required secrets are set." | |
| fi | |
| # Set output flags for conditional job execution | |
| dockerhub_available=$( [ -n "${{ vars.DOCKERHUB_USERNAME }}" ] && [ -n "${{ secrets.DOCKERHUB_TOKEN }}" ] && echo 'true' || echo 'false' ) | |
| quayio_available=$( [ -n "${{ vars.QUAYIO_USERNAME }}" ] && [ -n "${{ secrets.QUAYIO_TOKEN }}" ] && echo 'true' || echo 'false' ) | |
| setonixreg_available=$( [ -n "${{ vars.SETONIXREG_USERNAME }}" ] && [ -n "${{ secrets.SETONIXREG_PASS }}" ] && echo 'true' || echo 'false' ) | |
| echo "dockerhub_available=$dockerhub_available" >> $GITHUB_OUTPUT | |
| echo "quayio_available=$quayio_available" >> $GITHUB_OUTPUT | |
| echo "setonixreg_available=$setonixreg_available" >> $GITHUB_OUTPUT | |
| # Log registry availability status | |
| echo "" | |
| echo "=== Registry Credentials Check ===" | |
| if [ "$dockerhub_available" = "true" ]; then | |
| echo "[✓] Docker Hub: Credentials available" | |
| else | |
| echo "[✗] Docker Hub: Missing credentials (DOCKERHUB_USERNAME or DOCKERHUB_TOKEN)" | |
| fi | |
| if [ "$quayio_available" = "true" ]; then | |
| echo "[✓] Quay.io: Credentials available" | |
| else | |
| echo "[✗] Quay.io: Missing credentials (QUAYIO_USERNAME or QUAYIO_TOKEN)" | |
| fi | |
| if [ "$setonixreg_available" = "true" ]; then | |
| echo "[✓] Setonix Registry: Credentials available" | |
| else | |
| echo "[✗] Setonix Registry: Missing credentials (SETONIXREG_USERNAME or SETONIXREG_PASS)" | |
| fi | |
| echo "===================================" | |
| - name: Checkout source code | |
| uses: actions/checkout@v4 | |
| with: | |
| fetch-depth: 2 # Ensure enough history is available | |
| - name: Validate git history access | |
| run: | | |
| # Check if we have enough history for git diff | |
| if [ -z "${{ github.event.before }}" ]; then | |
| echo "Error: github.event.before is empty. Cannot perform git diff operation." | |
| echo "This usually means the repository history is not available or fetch-depth is insufficient." | |
| exit 1 | |
| fi | |
| # Verify that we can access the previous commit | |
| if ! git rev-parse --verify ${{ github.event.before }} >/dev/null 2>&1; then | |
| echo "Error: Cannot access previous commit ${{ github.event.before }}" | |
| echo "This indicates that fetch-depth: 2 is not sufficient or the history is not available." | |
| exit 1 | |
| fi | |
| echo "Git history verification passed. Previous commit ${{ github.event.before }} is accessible." | |
| - name: Detect modified Dockerfiles | |
| id: changed_files | |
| run: | | |
| # Get all Dockerfile variants (case-insensitive) | |
| files=$(git diff --name-only ${{ github.event.before }} ${{ github.sha }} | grep -i '\.dockerfile$' || true) | |
| if [ -z "$files" ]; then | |
| echo "No Dockerfile changes detected. This workflow only processes Dockerfile modifications." | |
| echo "files=" >> $GITHUB_OUTPUT | |
| echo "proceed_valid=false" >> $GITHUB_OUTPUT | |
| exit 0 | |
| fi | |
| # Count the number of changed files | |
| file_count=$(echo "$files" | wc -l) | |
| if [ "$file_count" -gt 1 ]; then | |
| echo "Multiple Dockerfiles changed ($file_count files). This workflow only processes single file changes." | |
| echo "files<<EOF" >> $GITHUB_OUTPUT | |
| echo "$files" >> $GITHUB_OUTPUT | |
| echo "EOF" >> $GITHUB_OUTPUT | |
| echo "proceed_valid=false" >> $GITHUB_OUTPUT | |
| exit 0 | |
| fi | |
| echo "Single Dockerfile changed: $files" | |
| echo "files<<EOF" >> $GITHUB_OUTPUT | |
| echo "$files" >> $GITHUB_OUTPUT | |
| echo "EOF" >> $GITHUB_OUTPUT | |
| echo "proceed_valid=true" >> $GITHUB_OUTPUT | |
| - name: Debug changed files output | |
| run: | | |
| echo "Files from output: ${{ steps.changed_files.outputs.files }}" | |
| - name: Validate single file modification | |
| id: validate_changes | |
| if: steps.changed_files.outputs.proceed_valid == 'true' | |
| run: | | |
| changed_files="${{ steps.changed_files.outputs.files }}" | |
| echo "Processing single file: $changed_files" | |
| echo "valid=true" >> $GITHUB_OUTPUT | |
| echo "dockerfile_path=$changed_files" >> $GITHUB_OUTPUT | |
| - name: Extract and validate version | |
| id: validate_version | |
| if: steps.validate_changes.outputs.valid == 'true' | |
| run: | | |
| file="${{ steps.validate_changes.outputs.dockerfile_path }}" | |
| if grep -q -E '^[^#]*LABEL\s+org\.opencontainers\.image\.version\s*=' "$file"; then | |
| version=$(grep -E '^[^#]*LABEL\s+org\.opencontainers\.image\.version\s*=' "$file" | sed -E 's/^[^#]*LABEL\s+org\.opencontainers\.image\.version\s*=\s*"?([^"]*)"?.*/\1/') | |
| if echo "$version" | grep -qE '^[0-9]+\.[0-9]+\.[0-9]+$'; then | |
| echo "Version: $version ✓" | |
| echo "valid=true" >> $GITHUB_OUTPUT | |
| echo "version=$version" >> $GITHUB_OUTPUT | |
| else | |
| echo "Version: $version ✗ (invalid semver)" | |
| echo "valid=false" >> $GITHUB_OUTPUT | |
| echo "message=Invalid version: $version" >> $GITHUB_OUTPUT | |
| fi | |
| else | |
| echo "Version: missing ✗" | |
| echo "valid=false" >> $GITHUB_OUTPUT | |
| echo "message=Missing version label" >> $GITHUB_OUTPUT | |
| fi | |
| - name: Parse build configuration flags | |
| id: parse_settings | |
| if: steps.validate_changes.outputs.valid == 'true' | |
| run: | | |
| file="${{ steps.validate_changes.outputs.dockerfile_path }}" | |
| # Dev mode | |
| if grep -q -E '^[^#]*LABEL\s+org\.opencontainers\.image\.devmode\s*=\s*true' "$file"; then | |
| echo "DevMode: enabled" | |
| echo "devmode=true" >> $GITHUB_OUTPUT | |
| else | |
| echo "DevMode: disabled" | |
| echo "devmode=false" >> $GITHUB_OUTPUT | |
| fi | |
| # Scan settings | |
| if grep -q -E '^[^#]*LABEL\s+org\.opencontainers\.image\.noscan\s*=\s*true' "$file"; then | |
| if grep -q -E '^[^#]*LABEL\s+org\.opencontainers\.image\.noscanreason\s*' "$file"; then | |
| reason=$(grep -E '^[^#]*LABEL\s+org\.opencontainers\.image\.noscanreason\s*' "$file") | |
| echo "Scan: disabled ($reason)" | |
| echo "noscan=true" >> $GITHUB_OUTPUT | |
| echo "noscanreason=$reason" >> $GITHUB_OUTPUT | |
| else | |
| echo "Scan: disabled but no reason provided" | |
| echo "noscan=false" >> $GITHUB_OUTPUT | |
| fi | |
| else | |
| echo "Scan: enabled" | |
| echo "noscan=false" >> $GITHUB_OUTPUT | |
| fi | |
| - name: Determine workflow execution status | |
| id: set_proceed_flag | |
| run: | | |
| files_proceed="${{ steps.changed_files.outputs.proceed_valid }}" | |
| changes_valid="${{ steps.validate_changes.outputs.valid }}" | |
| version_valid="${{ steps.validate_version.outputs.valid }}" | |
| # If files check failed, exit early | |
| if [ "$files_proceed" != "true" ]; then | |
| echo "(FAILED) File change validation failed" | |
| echo "proceed_valid=false" >> $GITHUB_OUTPUT | |
| echo "devmode=false" >> $GITHUB_OUTPUT | |
| echo "noscan=false" >> $GITHUB_OUTPUT | |
| exit 0 | |
| fi | |
| # Check version validation | |
| if [ "$version_valid" = "true" ]; then | |
| echo "(PASSED) All validations passed" | |
| echo "proceed_valid=true" >> $GITHUB_OUTPUT | |
| else | |
| echo "(FAILED) Version validation: ${{ steps.validate_version.outputs.message }}" | |
| echo "proceed_valid=false" >> $GITHUB_OUTPUT | |
| fi | |
| # Pass through outputs | |
| echo "dockerfile_path=${{ steps.validate_changes.outputs.dockerfile_path }}" >> $GITHUB_OUTPUT | |
| echo "version=${{ steps.validate_version.outputs.version }}" >> $GITHUB_OUTPUT | |
| echo "devmode=${{ steps.parse_settings.outputs.devmode || 'false' }}" >> $GITHUB_OUTPUT | |
| echo "noscan=${{ steps.parse_settings.outputs.noscan || 'false' }}" >> $GITHUB_OUTPUT | |
| - name: Generate date tag | |
| if: steps.set_proceed_flag.outputs.proceed_valid == 'true' | |
| id: date | |
| run: | | |
| date_tag=$(date +'%m-%d') | |
| echo "Date tag: $date_tag" | |
| echo "date=$date_tag" >> $GITHUB_OUTPUT | |
| BUILD-job: | |
| needs: PREPARE-job | |
| runs-on: setonix-podman02 | |
| if: needs.PREPARE-job.outputs.proceed_valid == 'true' | |
| outputs: | |
| image_tag: ${{ steps.build_container.outputs.image_tag }} | |
| dockerfile_name: ${{ steps.build_container.outputs.dockerfile_name }} | |
| steps: | |
| - name: Display build environment | |
| run: | | |
| echo "Hostname: $(hostname)" | |
| - name: Checkout source code | |
| uses: actions/checkout@v4 | |
| with: | |
| fetch-depth: 1 # As the runs-on machine maybe different from Build, re-checkout source code. Only the current commit is needed | |
| - name: Configure podman environment | |
| id: setup_env | |
| run: | | |
| echo "Setting up container environment variables..." | |
| # Export environment variables | |
| export XDG_DATA_HOME=/container/${USER}/data | |
| export XDG_RUNTIME_DIR=/container/${USER}/runtime | |
| export TMPDIR=/container/${USER}/tmp/ | |
| # Create required directories | |
| mkdir -p ${XDG_DATA_HOME} | |
| mkdir -p ${XDG_RUNTIME_DIR} | |
| mkdir -p ${TMPDIR} | |
| # Verify directories and output status | |
| echo "Environment setup completed:" | |
| echo "USER: ${USER}" | |
| echo "XDG_DATA_HOME: ${XDG_DATA_HOME}" | |
| echo "XDG_RUNTIME_DIR: ${XDG_RUNTIME_DIR}" | |
| echo "TMPDIR: ${TMPDIR}" | |
| # Check if directories exist | |
| for dir in "${XDG_DATA_HOME}" "${XDG_RUNTIME_DIR}" "${TMPDIR}"; do | |
| if [ -d "$dir" ]; then | |
| echo "✓ Podman Directory exists: $dir" | |
| else | |
| echo "✗ Podman Directory missing: $dir" | |
| exit 1 | |
| fi | |
| done | |
| # Set environment variables for subsequent steps | |
| echo "XDG_DATA_HOME=${XDG_DATA_HOME}" >> $GITHUB_ENV | |
| echo "XDG_RUNTIME_DIR=${XDG_RUNTIME_DIR}" >> $GITHUB_ENV | |
| echo "TMPDIR=${TMPDIR}" >> $GITHUB_ENV | |
| - name: Initialize build cache | |
| id: setup_cache | |
| run: | | |
| # Setup podman cache directory | |
| CACHE_DIR="/container/${USER}/podman-cache" | |
| mkdir -p "$CACHE_DIR" | |
| echo "[CACHE SETUP]:" | |
| echo " Cache directory: $CACHE_DIR" | |
| # Configure podman to use cache | |
| export TMPDIR="${TMPDIR:-/tmp}" | |
| # Check cache size if exists | |
| if [ -d "$CACHE_DIR" ]; then | |
| cache_size=$(du -sh "$CACHE_DIR" 2>/dev/null | cut -f1 || echo "0") | |
| echo " Current cache size: $cache_size" | |
| else | |
| echo " Cache directory created (empty)" | |
| fi | |
| echo "CACHE_DIR=$CACHE_DIR" >> $GITHUB_ENV | |
| - name: Build container image | |
| id: build_container | |
| run: | | |
| # Get variables from PREPARE-job | |
| dockerfile_path="${{ needs.PREPARE-job.outputs.dockerfile_path }}" | |
| version="${{ needs.PREPARE-job.outputs.version }}" | |
| # Extract filename without extension for tag | |
| dockerfile_name=$(basename "$dockerfile_path" .dockerfile) | |
| echo "Building container with podman..." | |
| echo "Dockerfile: $dockerfile_path" | |
| echo "Tag: ${dockerfile_name}:${version}" | |
| # Build with layer caching enabled | |
| podman build --format=docker \ | |
| --layers \ | |
| -f "$dockerfile_path" \ | |
| -t "${dockerfile_name}:${version}" \ | |
| $(dirname "$dockerfile_path") | |
| # Verify the image was built | |
| if podman images | grep -q "${dockerfile_name}"; then | |
| echo "✓ Image built successfully: ${dockerfile_name}:${version}" | |
| # Show caching information | |
| echo "[LAYER CACHE]: Podman layer caching enabled" | |
| echo "[OPTIMIZATION]: Subsequent builds will reuse unchanged layers" | |
| echo "[NOTE]: APT cache within container is cleaned as per Dockerfile" | |
| # Show all images to see layer reuse | |
| echo "[ALL IMAGES]:" | |
| podman images | head -10 | |
| else | |
| echo "✗ Failed to build image: ${dockerfile_name}:${version}" | |
| exit 1 | |
| fi | |
| # Set outputs for next step | |
| # Output Variables: | |
| # - image_tag: Complete image tag with version (e.g., "ex1:0.0.5") | |
| # - dockerfile_name: Base name without extension (e.g., "ex1") | |
| echo "image_tag=${dockerfile_name}:${version}" >> $GITHUB_OUTPUT | |
| echo "dockerfile_name=${dockerfile_name}" >> $GITHUB_OUTPUT | |
| - name: Export image to archive | |
| id: save_container | |
| run: | | |
| # Get variables from previous step | |
| image_tag="${{ steps.build_container.outputs.image_tag }}" | |
| dockerfile_name="${{ steps.build_container.outputs.dockerfile_name }}" | |
| version="${{ needs.PREPARE-job.outputs.version }}" | |
| # Define output file | |
| output_file="${dockerfile_name}_${version}.tar" | |
| echo "Saving container to Docker archive..." | |
| echo "Image: $image_tag" | |
| echo "Output: $output_file" | |
| # Save with podman using Docker archive format (compatible with Trivy) | |
| podman save --format docker-archive "$image_tag" -o "$output_file" | |
| # Verify the file was created | |
| if [ -f "$output_file" ]; then | |
| file_size=$(ls -lh "$output_file" | awk '{print $5}') | |
| echo "✓ Archive saved successfully: $output_file (Size: $file_size)" | |
| else | |
| echo "✗ Failed to save archive: $output_file" | |
| exit 1 | |
| fi | |
| # Set outputs | |
| # Output Variables: | |
| # - archive_file: Archive filename only (e.g., "ex1_0.0.5.tar") | |
| # - archive_path: Full path to archive (e.g., "/home/runner/work/repo/ex1_0.0.5.tar") | |
| echo "archive_file=$output_file" >> $GITHUB_OUTPUT | |
| echo "archive_path=${PWD}/$output_file" >> $GITHUB_OUTPUT | |
| - name: Upload archive to S3 storage | |
| id: s3_upload | |
| uses: ./.github/actions/setup-rclone | |
| with: | |
| access_key_id: ${{ secrets.ACACIA_ACCESS_KEY_ID }} | |
| secret_access_key: ${{ secrets.ACACIA_SECRET_ACCESS_KEY }} | |
| endpoint: https://projects.pawsey.org.au | |
| bucket: ${{ vars.ACACIA_BUCKETNAME }} | |
| destination_path: "" # Not used in upload mode | |
| upload_mode: 'true' | |
| upload_file: ${{ steps.save_container.outputs.archive_file }} | |
| upload_file_type: 'archive' | |
| - name: Load Singularity module and generate SIF file | |
| id: build_sif | |
| run: | | |
| # Get variables | |
| archive_file="${{ steps.save_container.outputs.archive_file }}" | |
| dockerfile_name="${{ steps.build_container.outputs.dockerfile_name }}" | |
| version="${{ needs.PREPARE-job.outputs.version }}" | |
| # Define SIF output file | |
| sif_file="${dockerfile_name}_${version}.sif" | |
| echo "Loading Singularity module and generating SIF file..." | |
| echo "Source: docker-archive://$archive_file" | |
| echo "Output: $sif_file" | |
| # Load Singularity module | |
| echo "Loading Singularity module..." | |
| if module load singularity/4.1.0; then | |
| echo "✓ Singularity module loaded successfully" | |
| else | |
| echo "✗ Failed to load Singularity module" | |
| exit 1 | |
| fi | |
| # Verify Singularity is available | |
| singularity --version | |
| # Build SIF file from Docker archive using Singularity | |
| echo "Building SIF file from Docker archive..." | |
| if singularity build "$sif_file" "docker-archive://$archive_file"; then | |
| if [ -f "$sif_file" ]; then | |
| file_size=$(ls -lh "$sif_file" | awk '{print $5}') | |
| echo "✓ SIF file generated successfully: $sif_file (Size: $file_size)" | |
| else | |
| echo "✗ SIF file generation failed - file not found" | |
| exit 1 | |
| fi | |
| else | |
| echo "✗ Failed to generate SIF file" | |
| exit 1 | |
| fi | |
| # Set outputs | |
| echo "sif_file=$sif_file" >> $GITHUB_OUTPUT | |
| echo "sif_path=${PWD}/$sif_file" >> $GITHUB_OUTPUT | |
| - name: Upload SIF file to S3 storage | |
| id: s3_sif_upload | |
| uses: ./.github/actions/setup-rclone | |
| with: | |
| access_key_id: ${{ secrets.ACACIA_ACCESS_KEY_ID }} | |
| secret_access_key: ${{ secrets.ACACIA_SECRET_ACCESS_KEY }} | |
| endpoint: https://projects.pawsey.org.au | |
| bucket: ${{ vars.ACACIA_SIF_BUCKETNAME }} | |
| destination_path: "" # Not used in upload mode | |
| upload_mode: 'true' | |
| upload_file: ${{ steps.build_sif.outputs.sif_file }} | |
| upload_file_type: 'sif' | |
| SCAN-AND-REPORT-job: | |
| needs: [BUILD-job, PREPARE-job] | |
| runs-on: setonix-podman02 | |
| if: needs.PREPARE-job.outputs.proceed_valid == 'true' && needs.PREPARE-job.outputs.noscan != 'true' | |
| env: | |
| DOCKERFILE_NAME: ${{ needs.BUILD-job.outputs.dockerfile_name }} | |
| VERSION: ${{ needs.PREPARE-job.outputs.version }} | |
| REPORT_DIR: ./trivy-reports | |
| steps: | |
| - name: Display scan environment | |
| run: | | |
| echo "Hostname: $(hostname)" | |
| - name: Download archive from S3 | |
| id: locate | |
| uses: ./.github/actions/setup-rclone | |
| with: | |
| access_key_id: ${{ secrets.ACACIA_ACCESS_KEY_ID }} | |
| secret_access_key: ${{ secrets.ACACIA_SECRET_ACCESS_KEY }} | |
| endpoint: https://projects.pawsey.org.au | |
| bucket: ${{ vars.ACACIA_BUCKETNAME }} | |
| destination_path: ${{ env.DOCKERFILE_NAME }}_${{ env.VERSION }}.tar | |
| download_mode: true | |
| dockerfile_name: ${{ env.DOCKERFILE_NAME }} | |
| version: ${{ env.VERSION }} | |
| load_to_podman: false | |
| - name: Initialize scan workspace | |
| run: | | |
| mkdir -p "${REPORT_DIR}" | |
| echo "Report directory created: ${REPORT_DIR}" | |
| # Generate JSON report first | |
| - name: Run vulnerability scan (JSON) | |
| uses: aquasecurity/trivy-action@master | |
| with: | |
| scan-type: image | |
| input: ./${{ steps.locate.outputs.archive_name }} | |
| format: json | |
| output: ${{ env.REPORT_DIR }}/scan-results.json | |
| severity: CRITICAL,HIGH,MEDIUM,LOW,UNKNOWN | |
| ignore-unfixed: true | |
| vuln-type: os,library | |
| exit-code: '0' | |
| hide-progress: true | |
| cache: true | |
| env: | |
| TRIVY_SKIP_DB_UPDATE: false | |
| TRIVY_SKIP_JAVA_DB_UPDATE: false | |
| # Process JSON to create readable summary | |
| - name: Process scan results to summary | |
| run: | | |
| python3 -c " | |
| import json | |
| import sys | |
| import os | |
| json_file = '${REPORT_DIR}/scan-results.json' | |
| output_file = '${REPORT_DIR}/summary.md' | |
| try: | |
| with open(json_file, 'r') as f: | |
| data = json.load(f) | |
| # Initialize counters | |
| counts = {'CRITICAL': 0, 'HIGH': 0, 'MEDIUM': 0, 'LOW': 0, 'UNKNOWN': 0} | |
| vulnerabilities = [] | |
| # Process results | |
| if 'Results' in data and data['Results']: | |
| for result in data['Results']: | |
| if 'Vulnerabilities' in result and result['Vulnerabilities']: | |
| for vuln in result['Vulnerabilities']: | |
| severity = vuln.get('Severity', 'UNKNOWN') | |
| if severity in counts: | |
| counts[severity] += 1 | |
| vulnerabilities.append(vuln) | |
| # Generate summary | |
| with open(output_file, 'w') as f: | |
| artifact_name = data.get('ArtifactName', 'Unknown') | |
| f.write(f'## Trivy Scan Summary — {artifact_name}\n\n') | |
| f.write(f'**Target:** {artifact_name}\n') | |
| f.write(f'**Total Vulnerabilities:** {sum(counts.values())}\n\n') | |
| f.write('### Severity Counts\n') | |
| for severity, count in counts.items(): | |
| if count > 0: | |
| f.write(f'- **{severity}:** {count}\n') | |
| else: | |
| f.write(f'- {severity}: {count}\n') | |
| f.write('\n') | |
| # Show critical and high details | |
| critical_high = [v for v in vulnerabilities if v.get('Severity') in ['CRITICAL', 'HIGH']] | |
| if critical_high: | |
| f.write('### Critical & High Severity Vulnerabilities\n\n') | |
| f.write('| Severity | CVE ID | Package | Installed | Fixed |\n') | |
| f.write('|----------|--------|---------|-----------|-------|\n') | |
| for vuln in critical_high[:15]: # Limit to first 15 | |
| severity = vuln.get('Severity', 'N/A') | |
| cve_id = vuln.get('VulnerabilityID', 'N/A') | |
| pkg = vuln.get('PkgName', 'N/A') | |
| installed = vuln.get('InstalledVersion', 'N/A') | |
| fixed = vuln.get('FixedVersion', 'N/A') or '❌' | |
| f.write(f'| {severity} | {cve_id} | {pkg} | {installed} | {fixed} |\n') | |
| if len(critical_high) > 15: | |
| f.write(f'\n*... and {len(critical_high) - 15} more critical/high vulnerabilities.*\n') | |
| elif sum(counts.values()) > 0: | |
| f.write('### Good News! 🎉\n\n') | |
| f.write('No CRITICAL or HIGH severity vulnerabilities found.\n') | |
| if counts['MEDIUM'] > 0: | |
| f.write(f'Only {counts[\"MEDIUM\"]} MEDIUM severity issues detected.\n') | |
| else: | |
| f.write('### Excellent! ✅\n\n') | |
| f.write('No vulnerabilities found in this image.\n') | |
| print(f'Successfully generated summary: {output_file}') | |
| except Exception as e: | |
| with open(output_file, 'w') as f: | |
| f.write(f'### ❌ Error Processing Scan Results\n\n') | |
| f.write(f'**Error:** {str(e)}\n\n') | |
| f.write(f'**JSON file:** {json_file}\n') | |
| f.write(f'**File exists:** {os.path.exists(json_file)}\n') | |
| print(f'Error: {e}', file=sys.stderr) | |
| " | |
| - name: Debug scan output | |
| run: | | |
| echo "=== Trivy output file exists? ===" | |
| ls -la "${REPORT_DIR}/summary.md" || echo "Summary file not found" | |
| echo "" | |
| echo "=== Trivy output content ===" | |
| cat "${REPORT_DIR}/summary.md" || echo "Cannot read summary file" | |
| echo "" | |
| echo "=== Report directory contents ===" | |
| ls -la "${REPORT_DIR}/" || echo "Report directory not found" | |
| - name: Publish scan report to workflow summary | |
| run: | | |
| { | |
| echo "## Trivy Scan Report for \`${{ env.DOCKERFILE_NAME }}\`" | |
| echo "" | |
| echo "- **Scan target**: \`${{ steps.locate.outputs.archive_name }}\`" | |
| echo "- **Archive source**: \`${{ steps.locate.outputs.archive_path }}\`" | |
| echo "- **Version**: \`${{ env.VERSION }}\`" | |
| echo "" | |
| # Check if summary file exists and has content | |
| if [ -f "${REPORT_DIR}/summary.md" ] && [ -s "${REPORT_DIR}/summary.md" ]; then | |
| echo "### Scan Results" | |
| cat "${REPORT_DIR}/summary.md" | |
| else | |
| echo "### ⚠️ Scan Results" | |
| echo "" | |
| echo "**Status:** Summary file not generated or empty" | |
| echo "" | |
| echo "**Possible causes:**" | |
| echo "- Trivy template processing failed" | |
| echo "- Archive file format issue" | |
| echo "- Trivy action configuration problem" | |
| echo "" | |
| echo "**Debug info:**" | |
| echo "- Template file: \`.github/trivy-summary.tpl\`" | |
| echo "- Expected output: \`${REPORT_DIR}/summary.md\`" | |
| echo "" | |
| echo "Check the debug steps above for more details." | |
| fi | |
| } >> "$GITHUB_STEP_SUMMARY" | |
| # Generate SARIF report for GitHub Security | |
| - name: Generate security report (SARIF) | |
| uses: aquasecurity/trivy-action@master | |
| if: always() | |
| with: | |
| scan-type: image | |
| input: ./${{ steps.locate.outputs.archive_name }} | |
| format: sarif | |
| output: ${{ env.REPORT_DIR }}/trivy-results.sarif | |
| severity: CRITICAL,HIGH,MEDIUM | |
| ignore-unfixed: true | |
| vuln-type: os,library | |
| exit-code: '0' | |
| hide-progress: true | |
| cache: true | |
| env: | |
| TRIVY_SKIP_DB_UPDATE: true | |
| TRIVY_SKIP_JAVA_DB_UPDATE: true | |
| - name: Upload security findings to GitHub | |
| uses: github/codeql-action/upload-sarif@v3 | |
| if: always() | |
| with: | |
| sarif_file: ${{ env.REPORT_DIR }}/trivy-results.sarif | |
| - name: Archive scan reports | |
| uses: actions/upload-artifact@v4 | |
| if: always() | |
| with: | |
| name: trivy-reports-${{ env.DOCKERFILE_NAME }}-${{ env.VERSION }} | |
| path: ${{ env.REPORT_DIR }}/ | |
| retention-days: 30 | |
| PUSH-PRIV-job: | |
| needs: [BUILD-job, PREPARE-job] | |
| runs-on: setonix-podman02 | |
| if: needs.PREPARE-job.outputs.proceed_valid == 'true' | |
| env: | |
| DOCKERFILE_NAME: ${{ needs.BUILD-job.outputs.dockerfile_name }} | |
| VERSION: ${{ needs.PREPARE-job.outputs.version }} | |
| BUCKET: ${{ vars.ACACIA_BUCKETNAME }} | |
| steps: | |
| - name: Display registry environment | |
| run: | | |
| echo "Hostname: $(hostname)" | |
| - name: Download archive from S3 | |
| id: locate_archive | |
| uses: ./.github/actions/setup-rclone | |
| with: | |
| access_key_id: ${{ secrets.ACACIA_ACCESS_KEY_ID }} | |
| secret_access_key: ${{ secrets.ACACIA_SECRET_ACCESS_KEY }} | |
| endpoint: https://projects.pawsey.org.au | |
| bucket: ${{ env.BUCKET }} | |
| destination_path: ${{ env.DOCKERFILE_NAME }}_${{ env.VERSION }}.tar | |
| download_mode: true | |
| dockerfile_name: ${{ env.DOCKERFILE_NAME }} | |
| version: ${{ env.VERSION }} | |
| load_to_podman: false | |
| - name: Push to Setonix private registry | |
| if: needs.PREPARE-job.outputs.setonixreg_available == 'true' | |
| continue-on-error: true | |
| run: | | |
| set +e # Don't exit on errors to allow independent execution | |
| echo "==========================================" | |
| echo " Setonix Registry Push Process" | |
| echo "==========================================" | |
| # Load image from archive first | |
| echo "📦 Loading image from archive..." | |
| archive_file="${{ steps.locate_archive.outputs.archive_name }}" | |
| if podman load -i "$archive_file"; then | |
| echo "✓ Image loaded successfully from archive" | |
| else | |
| echo "✗ Failed to load image from archive" | |
| exit 1 | |
| fi | |
| # Login to Setonix Registry | |
| echo "🔐 Logging into Setonix Registry..." | |
| if podman login https://setonix-registry.pawsey.org.au -u "${{ vars.SETONIXREG_USERNAME }}" -p "${{ secrets.SETONIXREG_PASS }}"; then | |
| echo "✓ Setonix Registry login successful" | |
| else | |
| echo "✗ Setonix Registry login failed" | |
| exit 1 | |
| fi | |
| # Tag and push | |
| image_tag="${DOCKERFILE_NAME}:${VERSION}" | |
| setonix_tag="setonix-registry.pawsey.org.au/${{ vars.SETONIXREG_USERNAME }}/${DOCKERFILE_NAME}:${VERSION}" | |
| echo "" | |
| echo "🏷️ Tagging image for Setonix Registry:" | |
| echo " Source: $image_tag" | |
| echo " Target: $setonix_tag" | |
| if podman tag "$image_tag" "$setonix_tag"; then | |
| echo "✓ Image tagged successfully" | |
| else | |
| echo "✗ Image tagging failed" | |
| exit 1 | |
| fi | |
| echo "" | |
| echo "📤 Pushing to Setonix Registry..." | |
| if podman push "$setonix_tag"; then | |
| echo "✅ Successfully pushed to Setonix Registry: $setonix_tag" | |
| else | |
| echo "❌ Failed to push to Setonix Registry" | |
| exit 1 | |
| fi | |
| # Cleanup local tagged image | |
| echo "" | |
| echo "🧹 Cleaning up local Setonix tagged image..." | |
| podman rmi "$setonix_tag" 2>/dev/null || echo " (Setonix tag already removed or not found)" | |
| podman rmi "$image_tag" 2>/dev/null || echo " (Base image already removed or not found)" | |
| PUSH-PUBLIC-job: | |
| needs: [BUILD-job, PREPARE-job, PUSH-PRIV-job] | |
| runs-on: setonix-podman02 | |
| if: needs.PREPARE-job.outputs.proceed_valid == 'true' | |
| environment: | |
| name: manual_approval | |
| env: | |
| DOCKERFILE_NAME: ${{ needs.BUILD-job.outputs.dockerfile_name }} | |
| VERSION: ${{ needs.PREPARE-job.outputs.version }} | |
| steps: | |
| - name: Display public registry push plan | |
| run: | | |
| echo "Hostname: $(hostname)" | |
| echo "" | |
| echo "=== Public Registry Push Plan ===" | |
| echo "Registry credentials were checked in PREPARE-job:" | |
| echo "" | |
| # Show Docker Hub push plan | |
| if [ "${{ needs.PREPARE-job.outputs.dockerhub_available }}" = "true" ]; then | |
| echo "[✓] Docker Hub: Will push to ${{ vars.DOCKERHUB_USERNAME }}/${DOCKERFILE_NAME}:${VERSION}" | |
| else | |
| echo "[skip] Docker Hub: Skipping (credentials not available)" | |
| fi | |
| # Show Quay.io push plan | |
| if [ "${{ needs.PREPARE-job.outputs.quayio_available }}" = "true" ]; then | |
| echo "[✓] Quay.io: Will push to quay.io/${{ vars.QUAYIO_USERNAME }}/${DOCKERFILE_NAME}:${VERSION}" | |
| else | |
| echo "[skip] Quay.io: Skipping (credentials not available)" | |
| fi | |
| # Show Setonix Registry status | |
| if [ "${{ needs.PREPARE-job.outputs.setonixreg_available }}" = "true" ]; then | |
| echo "[✓] Setonix Private Registry: Already pushed in PUSH-PRIV-job" | |
| echo "[✓] Setonix Public Registry: Will push to setonix-registry.pawsey.org.au/pawsey/${DOCKERFILE_NAME}:${VERSION}" | |
| else | |
| echo "[skip] Setonix Registries: Skipped (credentials not available)" | |
| fi | |
| # Check if any public registry is available | |
| if [ "${{ needs.PREPARE-job.outputs.dockerhub_available }}" != "true" ] && [ "${{ needs.PREPARE-job.outputs.quayio_available }}" != "true" ] && [ "${{ needs.PREPARE-job.outputs.setonixreg_available }}" != "true" ]; then | |
| echo "" | |
| echo "[INFO] No public registry pushes will be performed in this job." | |
| echo "Note: All registry credentials are not available." | |
| fi | |
| echo "" | |
| - name: Download archive and load to podman | |
| id: locate_and_load | |
| if: needs.PREPARE-job.outputs.dockerhub_available == 'true' || needs.PREPARE-job.outputs.quayio_available == 'true' || needs.PREPARE-job.outputs.setonixreg_available == 'true' | |
| uses: ./.github/actions/setup-rclone | |
| with: | |
| access_key_id: ${{ secrets.ACACIA_ACCESS_KEY_ID }} | |
| secret_access_key: ${{ secrets.ACACIA_SECRET_ACCESS_KEY }} | |
| endpoint: https://projects.pawsey.org.au | |
| bucket: ${{ vars.ACACIA_BUCKETNAME }} | |
| destination_path: ${{ env.DOCKERFILE_NAME }}_${{ env.VERSION }}.tar | |
| download_mode: true | |
| dockerfile_name: ${{ env.DOCKERFILE_NAME }} | |
| version: ${{ env.VERSION }} | |
| load_to_podman: true | |
| - name: Display skip message for missing credentials | |
| if: needs.PREPARE-job.outputs.dockerhub_available != 'true' && needs.PREPARE-job.outputs.quayio_available != 'true' && needs.PREPARE-job.outputs.setonixreg_available != 'true' | |
| run: | | |
| echo "" | |
| echo "==========================================" | |
| echo " Public Registry Push: SKIPPED" | |
| echo "==========================================" | |
| echo "" | |
| echo "No public registry credentials were found." | |
| echo "The workflow will complete successfully without pushing to public registries." | |
| echo "" | |
| echo "This is normal behavior when:" | |
| echo "• You only want to use private storage (S3)" | |
| echo "• Credentials are not yet configured" | |
| echo "• Testing the build pipeline" | |
| echo "" | |
| echo "To enable public registry pushes in the future, configure:" | |
| echo "• Docker Hub: Set DOCKERHUB_USERNAME (repo variable) and DOCKERHUB_TOKEN (repo secret)" | |
| echo "• Quay.io: Set QUAYIO_USERNAME (repo variable) and QUAYIO_TOKEN (repo secret)" | |
| echo "" | |
| echo "[✓] Job completed successfully (no action required)" | |
| echo "" | |
| - name: Initialize Docker configuration | |
| if: needs.PREPARE-job.outputs.dockerhub_available == 'true' || needs.PREPARE-job.outputs.quayio_available == 'true' || needs.PREPARE-job.outputs.setonixreg_available == 'true' | |
| run: | | |
| mkdir -p ~/.docker | |
| echo "Created Docker config directory: ~/.docker" | |
| # ============================================ | |
| # Docker Hub Push (Independent Operation) | |
| # ============================================ | |
| - name: Push to Docker Hub public registry | |
| if: needs.PREPARE-job.outputs.dockerhub_available == 'true' | |
| continue-on-error: true | |
| run: | | |
| set +e # Don't exit on errors to allow independent execution | |
| echo "==========================================" | |
| echo " Docker Hub Push Process" | |
| echo "==========================================" | |
| # Login to Docker Hub | |
| echo "🔐 Logging into Docker Hub..." | |
| if podman login docker.io -u "${{ vars.DOCKERHUB_USERNAME }}" -p "${{ secrets.DOCKERHUB_TOKEN }}"; then | |
| echo "✓ Docker Hub login successful" | |
| else | |
| echo "✗ Docker Hub login failed" | |
| exit 1 | |
| fi | |
| # Tag and push | |
| image_tag="${{ steps.locate_and_load.outputs.image_tag }}" | |
| dockerhub_tag="${{ vars.DOCKERHUB_USERNAME }}/${DOCKERFILE_NAME}:${VERSION}" | |
| echo "" | |
| echo "🏷️ Tagging image for Docker Hub:" | |
| echo " Source: $image_tag" | |
| echo " Target: $dockerhub_tag" | |
| if podman tag "$image_tag" "$dockerhub_tag"; then | |
| echo "✓ Image tagged successfully" | |
| else | |
| echo "✗ Image tagging failed" | |
| exit 1 | |
| fi | |
| echo "" | |
| echo "📤 Pushing to Docker Hub..." | |
| if podman push "$dockerhub_tag"; then | |
| echo "✅ Successfully pushed to Docker Hub: $dockerhub_tag" | |
| else | |
| echo "❌ Failed to push to Docker Hub" | |
| exit 1 | |
| fi | |
| # ============================================ | |
| # Quay.io Push (Independent Operation) | |
| # ============================================ | |
| - name: Push to Quay.io public registry | |
| if: needs.PREPARE-job.outputs.quayio_available == 'true' | |
| continue-on-error: true | |
| run: | | |
| set +e # Don't exit on errors to allow independent execution | |
| echo "==========================================" | |
| echo " Quay.io Push Process" | |
| echo "==========================================" | |
| # Login to Quay.io | |
| echo "🔐 Logging into Quay.io..." | |
| if podman login quay.io -u "${{ vars.QUAYIO_USERNAME }}" -p "${{ secrets.QUAYIO_TOKEN }}"; then | |
| echo "✓ Quay.io login successful" | |
| else | |
| echo "✗ Quay.io login failed" | |
| exit 1 | |
| fi | |
| # Tag and push | |
| image_tag="${{ steps.locate_and_load.outputs.image_tag }}" | |
| quayio_tag="quay.io/${{ vars.QUAYIO_USERNAME }}/${DOCKERFILE_NAME}:${VERSION}" | |
| echo "" | |
| echo "🏷️ Tagging image for Quay.io:" | |
| echo " Source: $image_tag" | |
| echo " Target: $quayio_tag" | |
| if podman tag "$image_tag" "$quayio_tag"; then | |
| echo "✓ Image tagged successfully" | |
| else | |
| echo "✗ Image tagging failed" | |
| exit 1 | |
| fi | |
| echo "" | |
| echo "📤 Pushing to Quay.io..." | |
| if podman push "$quayio_tag"; then | |
| echo "✅ Successfully pushed to Quay.io: $quayio_tag" | |
| else | |
| echo "❌ Failed to push to Quay.io" | |
| exit 1 | |
| fi | |
| # ============================================ | |
| # Setonix Public Registry Push (Independent Operation) | |
| # ============================================ | |
| - name: Push to Setonix public registry | |
| if: needs.PREPARE-job.outputs.setonixreg_available == 'true' | |
| continue-on-error: true | |
| run: | | |
| set +e # Don't exit on errors to allow independent execution | |
| echo "==========================================" | |
| echo " Setonix Public Registry Push Process" | |
| echo "==========================================" | |
| # Login to Setonix Registry | |
| echo "🔐 Logging into Setonix Registry..." | |
| if podman login https://setonix-registry.pawsey.org.au -u "${{ vars.SETONIXREG_USERNAME }}" -p "${{ secrets.SETONIXREG_PASS }}"; then | |
| echo "✓ Setonix Registry login successful" | |
| else | |
| echo "✗ Setonix Registry login failed" | |
| exit 1 | |
| fi | |
| # Tag and push to public channel | |
| image_tag="${{ steps.locate_and_load.outputs.image_tag }}" | |
| setonix_public_tag="setonix-registry.pawsey.org.au/pawsey/${DOCKERFILE_NAME}:${VERSION}" | |
| echo "" | |
| echo "🏷️ Tagging image for Setonix Public Registry:" | |
| echo " Source: $image_tag" | |
| echo " Target: $setonix_public_tag" | |
| if podman tag "$image_tag" "$setonix_public_tag"; then | |
| echo "✓ Image tagged successfully" | |
| else | |
| echo "✗ Image tagging failed" | |
| exit 1 | |
| fi | |
| echo "" | |
| echo "📤 Pushing to Setonix Public Registry..." | |
| if podman push "$setonix_public_tag"; then | |
| echo "✅ Successfully pushed to Setonix Public Registry: $setonix_public_tag" | |
| else | |
| echo "❌ Failed to push to Setonix Public Registry" | |
| exit 1 | |
| fi | |
| - name: Clean up podman images | |
| if: always() && (needs.PREPARE-job.outputs.dockerhub_available == 'true' || needs.PREPARE-job.outputs.quayio_available == 'true' || needs.PREPARE-job.outputs.setonixreg_available == 'true') | |
| run: | | |
| echo "🧹 Cleaning up local images..." | |
| image_tag="${{ steps.locate_and_load.outputs.image_tag }}" | |
| # Remove original image | |
| echo "Removing base image: $image_tag" | |
| podman rmi "$image_tag" 2>/dev/null || echo " (Base image already removed or not found)" | |
| # Remove Docker Hub tagged image if it was created | |
| if [ "${{ needs.PREPARE-job.outputs.dockerhub_available }}" = "true" ]; then | |
| dockerhub_tag="${{ vars.DOCKERHUB_USERNAME }}/${DOCKERFILE_NAME}:${VERSION}" | |
| echo "Removing Docker Hub tag: $dockerhub_tag" | |
| podman rmi "$dockerhub_tag" 2>/dev/null || echo " (Docker Hub tag already removed or not found)" | |
| fi | |
| # Remove Quay.io tagged image if it was created | |
| if [ "${{ needs.PREPARE-job.outputs.quayio_available }}" = "true" ]; then | |
| quayio_tag="quay.io/${{ vars.QUAYIO_USERNAME }}/${DOCKERFILE_NAME}:${VERSION}" | |
| echo "Removing Quay.io tag: $quayio_tag" | |
| podman rmi "$quayio_tag" 2>/dev/null || echo " (Quay.io tag already removed or not found)" | |
| fi | |
| # Remove Setonix Public Registry tagged image if it was created | |
| if [ "${{ needs.PREPARE-job.outputs.setonixreg_available }}" = "true" ]; then | |
| setonix_public_tag="setonix-registry.pawsey.org.au/pawsey/${DOCKERFILE_NAME}:${VERSION}" | |
| echo "Removing Setonix Public tag: $setonix_public_tag" | |
| podman rmi "$setonix_public_tag" 2>/dev/null || echo " (Setonix Public tag already removed or not found)" | |
| fi | |
| echo "✅ Local image cleanup completed" | |
| DEPLOY-job: | |
| needs: [BUILD-job, PREPARE-job, PUSH-PUBLIC-job] | |
| runs-on: setonix-podman02 | |
| if: needs.PREPARE-job.outputs.proceed_valid == 'true' && (needs.PREPARE-job.outputs.dockerhub_available == 'true' || needs.PREPARE-job.outputs.quayio_available == 'true') | |
| env: | |
| DOCKERFILE_NAME: ${{ needs.BUILD-job.outputs.dockerfile_name }} | |
| VERSION: ${{ needs.PREPARE-job.outputs.version }} | |
| steps: | |
| - name: Display deployment environment | |
| run: | | |
| echo "Hostname: $(hostname)" | |
| echo "Starting SHPC deployment for ${DOCKERFILE_NAME}:${VERSION}" | |
| echo "" | |
| echo "Available public registries:" | |
| if [ "${{ needs.PREPARE-job.outputs.dockerhub_available }}" = "true" ]; then | |
| echo "• Docker Hub: ${{ vars.DOCKERHUB_USERNAME }}/${DOCKERFILE_NAME}:${VERSION}" | |
| fi | |
| if [ "${{ needs.PREPARE-job.outputs.quayio_available }}" = "true" ]; then | |
| echo "• Quay.io: quay.io/${{ vars.QUAYIO_USERNAME }}/${DOCKERFILE_NAME}:${VERSION}" | |
| fi | |
| - name: Load SHPC module and setup environment | |
| id: shpc_setup | |
| run: | | |
| set -euo pipefail | |
| echo "Loading SHPC module..." | |
| if module load shpc/0.1.32; then | |
| echo "✓ SHPC module loaded successfully" | |
| shpc --version | |
| else | |
| echo "✗ Failed to load SHPC module" | |
| exit 1 | |
| fi | |
| # Set SHPC registry path | |
| SHPC_REGISTRY="/software/setonix/2025.08/pawsey/software/shpc/pawsey_registry" | |
| echo "SHPC_REGISTRY=$SHPC_REGISTRY" >> $GITHUB_ENV | |
| echo "SHPC registry path: $SHPC_REGISTRY" | |
| - name: Determine deployment target and get SHA256 digest | |
| id: get_image_info | |
| run: | | |
| set -euo pipefail | |
| # Prioritize Quay.io if available, otherwise use Docker Hub | |
| if [ "${{ needs.PREPARE-job.outputs.quayio_available }}" = "true" ]; then | |
| full_image="quay.io/${{ vars.QUAYIO_USERNAME }}/${DOCKERFILE_NAME}:${VERSION}" | |
| registry_url="https://quay.io/v2" | |
| api_path="${{ vars.QUAYIO_USERNAME }}/${DOCKERFILE_NAME}" | |
| registry_type="quay.io" | |
| elif [ "${{ needs.PREPARE-job.outputs.dockerhub_available }}" = "true" ]; then | |
| full_image="${{ vars.DOCKERHUB_USERNAME }}/${DOCKERFILE_NAME}:${VERSION}" | |
| registry_url="https://registry-1.docker.io/v2" | |
| api_path="${{ vars.DOCKERHUB_USERNAME }}/${DOCKERFILE_NAME}" | |
| registry_type="docker.io" | |
| else | |
| echo "Error: No public registry available for deployment" | |
| exit 1 | |
| fi | |
| echo "Deploying image: $full_image" | |
| echo "Registry API: $registry_url" | |
| echo "API path: $api_path" | |
| # Get manifest and extract digest | |
| echo "Fetching manifest for tag: ${VERSION}" | |
| digest=$(curl -s -H "Accept: application/vnd.docker.distribution.manifest.v2+json" \ | |
| "$registry_url/$api_path/manifests/${VERSION}" \ | |
| | jq -r '.config.digest // empty') | |
| if [ -z "$digest" ] || [ "$digest" = "null" ]; then | |
| echo "Warning: Could not get digest, using placeholder" | |
| digest="sha256:placeholder_digest_for_${VERSION}" | |
| fi | |
| echo "SHA256 digest: $digest" | |
| # Set outputs | |
| echo "full_image=$full_image" >> $GITHUB_OUTPUT | |
| echo "registry_type=$registry_type" >> $GITHUB_OUTPUT | |
| echo "digest=$digest" >> $GITHUB_OUTPUT | |
| - name: Create SHPC container registry entry | |
| run: | | |
| set -euo pipefail | |
| full_image="${{ steps.get_image_info.outputs.full_image }}" | |
| registry_type="${{ steps.get_image_info.outputs.registry_type }}" | |
| digest="${{ steps.get_image_info.outputs.digest }}" | |
| # Create registry directory structure based on the deployed image | |
| if [ "$registry_type" = "quay.io" ]; then | |
| shpc_path="${SHPC_REGISTRY}/quay.io/${{ vars.QUAYIO_USERNAME }}/${DOCKERFILE_NAME}" | |
| else | |
| shpc_path="${SHPC_REGISTRY}/docker.io/${{ vars.DOCKERHUB_USERNAME }}/${DOCKERFILE_NAME}" | |
| fi | |
| echo "Creating SHPC registry entry at: $shpc_path" | |
| # Create container.yaml content | |
| container_yaml=$(cat <<EOF | |
| docker: $full_image | |
| latest: | |
| "${VERSION}": "$digest" | |
| tags: | |
| "${VERSION}": "$digest" | |
| maintainer: "@github-actions" | |
| description: "Container for ${DOCKERFILE_NAME} version ${VERSION} - built and deployed via CI/CD" | |
| url: "https://github.com/${GITHUB_REPOSITORY}" | |
| EOF | |
| ) | |
| echo "Container YAML content:" | |
| echo "$container_yaml" | |
| # Verify SHPC configuration before writing | |
| echo "Verifying SHPC configuration..." | |
| module load shpc/0.1.32 | |
| if shpc config get registry | grep -q pawsey_registry; then | |
| echo "✓ SHPC registry is properly configured" | |
| else | |
| echo "Warning: SHPC registry may not be properly configured" | |
| fi | |
| # Write container YAML to temporary file | |
| temp_file="/tmp/container_${RANDOM}.yaml" | |
| echo "$container_yaml" > "$temp_file" | |
| echo "Container YAML written to temporary file: $temp_file" | |
| # Switch to spack user to create registry entry | |
| echo "Switching to spack user to write to registry..." | |
| sudo su - spack << EOF | |
| set -euo pipefail | |
| echo "Now running as spack user" | |
| mkdir -p '$shpc_path' | |
| cp '$temp_file' '$shpc_path/container.yaml' | |
| echo '✓ SHPC registry entry created successfully' | |
| ls -la '$shpc_path/' | |
| cat '$shpc_path/container.yaml' | |
| EOF | |
| # Clean up temporary file | |
| rm -f "$temp_file" | |
| echo "Temporary file cleaned up" | |
| echo "✅ SHPC deployment completed" | |
| echo "Registry path: $shpc_path" | |
| echo "Image: $full_image" | |
| echo "Version: ${VERSION}" | |
| echo "Digest: $digest" | |
| SUMMARY-job: | |
| needs: [BUILD-job, PREPARE-job, PUSH-PRIV-job, PUSH-PUBLIC-job, DEPLOY-job] | |
| runs-on: ubuntu-latest | |
| if: always() && needs.PREPARE-job.outputs.proceed_valid == 'true' | |
| env: | |
| DOCKERFILE_NAME: ${{ needs.BUILD-job.outputs.dockerfile_name }} | |
| VERSION: ${{ needs.PREPARE-job.outputs.version }} | |
| steps: | |
| - name: Generate deployment summary | |
| run: | | |
| { | |
| echo "# 🚀 Container Build and Deployment Summary" | |
| echo "" | |
| echo "**Image:** \`${{ env.DOCKERFILE_NAME }}:${{ env.VERSION }}\`" | |
| echo "**Build Status:** ✅ Completed" | |
| echo "**Timestamp:** $(date '+%Y-%m-%d %H:%M:%S UTC')" | |
| echo "" | |
| echo "## 📦 Archive Storage" | |
| echo "" | |
| echo "| Location | Address | Status |" | |
| echo "|----------|---------|--------|" | |
| # S3 Docker Archive Storage | |
| if [ "${{ needs.BUILD-job.result }}" = "success" ]; then | |
| echo "| S3 Docker Archive | \`s3://${{ vars.ACACIA_BUCKETNAME }}/${{ env.DOCKERFILE_NAME }}_${{ env.VERSION }}.tar\` | ✅ Uploaded |" | |
| else | |
| echo "| S3 Docker Archive | \`s3://${{ vars.ACACIA_BUCKETNAME }}/${{ env.DOCKERFILE_NAME }}_${{ env.VERSION }}.tar\` | ❌ Failed |" | |
| fi | |
| # S3 Singularity SIF Storage | |
| if [ "${{ needs.BUILD-job.result }}" = "success" ]; then | |
| echo "| S3 Singularity SIF | \`s3://${{ vars.ACACIA_SIF_BUCKETNAME }}/${{ env.DOCKERFILE_NAME }}_${{ env.VERSION }}.sif\` | ✅ Uploaded |" | |
| else | |
| echo "| S3 Singularity SIF | \`s3://${{ vars.ACACIA_SIF_BUCKETNAME }}/${{ env.DOCKERFILE_NAME }}_${{ env.VERSION }}.sif\` | ❌ Failed |" | |
| fi | |
| echo "" | |
| echo "## 🏗️ Container Registry Deployments" | |
| echo "" | |
| echo "| Registry | Address | Status |" | |
| echo "|----------|---------|--------|" | |
| # Setonix Private Registry | |
| if [ "${{ needs.PREPARE-job.outputs.setonixreg_available }}" = "true" ] && [ "${{ needs.PUSH-PRIV-job.result }}" = "success" ]; then | |
| echo "| Setonix Private | \`setonix-registry.pawsey.org.au/${{ vars.SETONIXREG_USERNAME }}/${{ env.DOCKERFILE_NAME }}:${{ env.VERSION }}\` | ✅ Pushed |" | |
| elif [ "${{ needs.PREPARE-job.outputs.setonixreg_available }}" = "true" ]; then | |
| echo "| Setonix Private | \`setonix-registry.pawsey.org.au/${{ vars.SETONIXREG_USERNAME }}/${{ env.DOCKERFILE_NAME }}:${{ env.VERSION }}\` | ❌ Failed |" | |
| else | |
| echo "| Setonix Private | \`setonix-registry.pawsey.org.au/${{ vars.SETONIXREG_USERNAME }}/${{ env.DOCKERFILE_NAME }}:${{ env.VERSION }}\` | ⏭️ Skipped (No credentials) |" | |
| fi | |
| # Setonix Public Registry | |
| if [ "${{ needs.PREPARE-job.outputs.setonixreg_available }}" = "true" ] && [ "${{ needs.PUSH-PUBLIC-job.result }}" = "success" ]; then | |
| echo "| Setonix Public | \`setonix-registry.pawsey.org.au/pawsey/${{ env.DOCKERFILE_NAME }}:${{ env.VERSION }}\` | ✅ Pushed |" | |
| elif [ "${{ needs.PREPARE-job.outputs.setonixreg_available }}" = "true" ]; then | |
| echo "| Setonix Public | \`setonix-registry.pawsey.org.au/pawsey/${{ env.DOCKERFILE_NAME }}:${{ env.VERSION }}\` | ❌ Failed |" | |
| else | |
| echo "| Setonix Public | \`setonix-registry.pawsey.org.au/pawsey/${{ env.DOCKERFILE_NAME }}:${{ env.VERSION }}\` | ⏭️ Skipped (No credentials) |" | |
| fi | |
| # Docker Hub | |
| if [ "${{ needs.PREPARE-job.outputs.dockerhub_available }}" = "true" ] && [ "${{ needs.PUSH-PUBLIC-job.result }}" = "success" ]; then | |
| echo "| Docker Hub | \`docker.io/${{ vars.DOCKERHUB_USERNAME }}/${{ env.DOCKERFILE_NAME }}:${{ env.VERSION }}\` | ✅ Pushed |" | |
| elif [ "${{ needs.PREPARE-job.outputs.dockerhub_available }}" = "true" ]; then | |
| echo "| Docker Hub | \`docker.io/${{ vars.DOCKERHUB_USERNAME }}/${{ env.DOCKERFILE_NAME }}:${{ env.VERSION }}\` | ❌ Failed |" | |
| else | |
| echo "| Docker Hub | \`docker.io/${{ vars.DOCKERHUB_USERNAME }}/${{ env.DOCKERFILE_NAME }}:${{ env.VERSION }}\` | ⏭️ Skipped (No credentials) |" | |
| fi | |
| # Quay.io | |
| if [ "${{ needs.PREPARE-job.outputs.quayio_available }}" = "true" ] && [ "${{ needs.PUSH-PUBLIC-job.result }}" = "success" ]; then | |
| echo "| Quay.io | \`quay.io/${{ vars.QUAYIO_USERNAME }}/${{ env.DOCKERFILE_NAME }}:${{ env.VERSION }}\` | ✅ Pushed |" | |
| elif [ "${{ needs.PREPARE-job.outputs.quayio_available }}" = "true" ]; then | |
| echo "| Quay.io | \`quay.io/${{ vars.QUAYIO_USERNAME }}/${{ env.DOCKERFILE_NAME }}:${{ env.VERSION }}\` | ❌ Failed |" | |
| else | |
| echo "| Quay.io | \`quay.io/${{ vars.QUAYIO_USERNAME }}/${{ env.DOCKERFILE_NAME }}:${{ env.VERSION }}\` | ⏭️ Skipped (No credentials) |" | |
| fi | |
| echo "" | |
| echo "## 🔧 Container Usage Commands" | |
| echo "" | |
| echo "### Singularity Usage" | |
| echo "" | |
| echo "Download and use the SIF file directly:" | |
| echo "\`\`\`bash" | |
| echo "# Download SIF file from S3 (requires rclone configuration)" | |
| echo "rclone copy pawsey0012:${{ vars.ACACIA_SIF_BUCKETNAME }}/${{ env.DOCKERFILE_NAME }}_${{ env.VERSION }}.sif ./" | |
| echo "" | |
| echo "# Run with Singularity" | |
| echo "singularity exec ${{ env.DOCKERFILE_NAME }}_${{ env.VERSION }}.sif <command>" | |
| echo "# or" | |
| echo "singularity run ${{ env.DOCKERFILE_NAME }}_${{ env.VERSION }}.sif" | |
| echo "\`\`\`" | |
| echo "" | |
| echo "### Container Engine Pull Commands" | |
| echo "" | |
| echo "Use the following commands to pull the container image:" | |
| echo "" | |
| # Generate pull commands for successful pushes | |
| if [ "${{ needs.PREPARE-job.outputs.setonixreg_available }}" = "true" ] && [ "${{ needs.PUSH-PRIV-job.result }}" = "success" ]; then | |
| echo "### Setonix Private Registry" | |
| echo "\`\`\`bash" | |
| echo "podman pull setonix-registry.pawsey.org.au/${{ vars.SETONIXREG_USERNAME }}/${{ env.DOCKERFILE_NAME }}:${{ env.VERSION }}" | |
| echo "\`\`\`" | |
| echo "" | |
| fi | |
| if [ "${{ needs.PREPARE-job.outputs.setonixreg_available }}" = "true" ] && [ "${{ needs.PUSH-PUBLIC-job.result }}" = "success" ]; then | |
| echo "### Setonix Public Registry" | |
| echo "\`\`\`bash" | |
| echo "podman pull setonix-registry.pawsey.org.au/pawsey/${{ env.DOCKERFILE_NAME }}:${{ env.VERSION }}" | |
| echo "\`\`\`" | |
| echo "" | |
| fi | |
| if [ "${{ needs.PREPARE-job.outputs.dockerhub_available }}" = "true" ] && [ "${{ needs.PUSH-PUBLIC-job.result }}" = "success" ]; then | |
| echo "### Docker Hub" | |
| echo "\`\`\`bash" | |
| echo "docker pull ${{ vars.DOCKERHUB_USERNAME }}/${{ env.DOCKERFILE_NAME }}:${{ env.VERSION }}" | |
| echo "# or" | |
| echo "podman pull docker.io/${{ vars.DOCKERHUB_USERNAME }}/${{ env.DOCKERFILE_NAME }}:${{ env.VERSION }}" | |
| echo "\`\`\`" | |
| echo "" | |
| fi | |
| if [ "${{ needs.PREPARE-job.outputs.quayio_available }}" = "true" ] && [ "${{ needs.PUSH-PUBLIC-job.result }}" = "success" ]; then | |
| echo "### Quay.io" | |
| echo "\`\`\`bash" | |
| echo "podman pull quay.io/${{ vars.QUAYIO_USERNAME }}/${{ env.DOCKERFILE_NAME }}:${{ env.VERSION }}" | |
| echo "\`\`\`" | |
| echo "" | |
| fi | |
| echo "## 🚀 SHPC Container Deployment" | |
| echo "" | |
| echo "| Registry Path | Image Source | Status |" | |
| echo "|---------------|--------------|--------|" | |
| # SHPC deployment status based on DEPLOY-job result and available registries | |
| if [ "${{ needs.DEPLOY-job.result }}" = "success" ]; then | |
| # Determine which registry was used for SHPC deployment | |
| if [ "${{ needs.PREPARE-job.outputs.quayio_available }}" = "true" ]; then | |
| echo "| \`/software/setonix/2025.08/pawsey/software/shpc/pawsey_registry/quay.io/${{ vars.QUAYIO_USERNAME }}/${{ env.DOCKERFILE_NAME }}/\` | \`quay.io/${{ vars.QUAYIO_USERNAME }}/${{ env.DOCKERFILE_NAME }}:${{ env.VERSION }}\` | ✅ Deployed |" | |
| echo "" | |
| echo "### SHPC Usage Commands" | |
| echo "" | |
| echo "Load and use the container via SHPC:" | |
| echo "\`\`\`bash" | |
| echo "# Load SHPC module" | |
| echo "module load shpc/0.1.32" | |
| echo "" | |
| echo "# Install the container" | |
| echo "shpc install quay.io/${{ vars.QUAYIO_USERNAME }}/${{ env.DOCKERFILE_NAME }}:${{ env.VERSION }}" | |
| echo "" | |
| echo "# Show available commands" | |
| echo "shpc show quay.io/${{ vars.QUAYIO_USERNAME }}/${{ env.DOCKERFILE_NAME }}:${{ env.VERSION }}" | |
| echo "" | |
| echo "# Use the container" | |
| echo "shpc run quay.io/${{ vars.QUAYIO_USERNAME }}/${{ env.DOCKERFILE_NAME }}:${{ env.VERSION }} <command>" | |
| echo "\`\`\`" | |
| elif [ "${{ needs.PREPARE-job.outputs.dockerhub_available }}" = "true" ]; then | |
| echo "| \`/software/setonix/2025.08/pawsey/software/shpc/pawsey_registry/docker.io/${{ vars.DOCKERHUB_USERNAME }}/${{ env.DOCKERFILE_NAME }}/\` | \`docker.io/${{ vars.DOCKERHUB_USERNAME }}/${{ env.DOCKERFILE_NAME }}:${{ env.VERSION }}\` | ✅ Deployed |" | |
| echo "" | |
| echo "### SHPC Usage Commands" | |
| echo "" | |
| echo "Load and use the container via SHPC:" | |
| echo "\`\`\`bash" | |
| echo "# Load SHPC module" | |
| echo "module load shpc/0.1.32" | |
| echo "" | |
| echo "# Install the container" | |
| echo "shpc install docker.io/${{ vars.DOCKERHUB_USERNAME }}/${{ env.DOCKERFILE_NAME }}:${{ env.VERSION }}" | |
| echo "" | |
| echo "# Show available commands" | |
| echo "shpc show docker.io/${{ vars.DOCKERHUB_USERNAME }}/${{ env.DOCKERFILE_NAME }}:${{ env.VERSION }}" | |
| echo "" | |
| echo "# Use the container" | |
| echo "shpc run docker.io/${{ vars.DOCKERHUB_USERNAME }}/${{ env.DOCKERFILE_NAME }}:${{ env.VERSION }} <command>" | |
| echo "\`\`\`" | |
| fi | |
| elif [ "${{ needs.DEPLOY-job.result }}" = "failure" ]; then | |
| echo "| SHPC Registry | N/A | ❌ Deployment Failed |" | |
| elif [ "${{ needs.DEPLOY-job.result }}" = "skipped" ]; then | |
| echo "| SHPC Registry | N/A | ⏭️ Skipped (No public registry available) |" | |
| else | |
| echo "| SHPC Registry | N/A | ⏸️ Not Executed |" | |
| fi | |
| echo "" | |
| echo "## 📊 Job Results" | |
| echo "" | |
| echo "| Job | Status |" | |
| echo "|-----|--------|" | |
| echo "| PREPARE | ${{ needs.PREPARE-job.result == 'success' && '✅ Success' || '❌ Failed' }} |" | |
| echo "| BUILD | ${{ needs.BUILD-job.result == 'success' && '✅ Success' || '❌ Failed' }} |" | |
| echo "| SCAN-AND-REPORT | ${{ needs['SCAN-AND-REPORT-job'].result == 'success' && '✅ Success' || (needs['SCAN-AND-REPORT-job'].result == 'skipped' && '⏭️ Skipped') || '❌ Failed' }} |" | |
| echo "| PUSH-PRIV | ${{ needs.PUSH-PRIV-job.result == 'success' && '✅ Success' || '❌ Failed' }} |" | |
| echo "| PUSH-PUBLIC | ${{ needs.PUSH-PUBLIC-job.result == 'success' && '✅ Success' || '❌ Failed' }} |" | |
| echo "| DEPLOY | ${{ needs.DEPLOY-job.result == 'success' && '✅ Success' || (needs.DEPLOY-job.result == 'skipped' && '⏭️ Skipped') || '❌ Failed' }} |" | |
| } >> "$GITHUB_STEP_SUMMARY" | |
| # CLEANUP-job: | |
| # needs: [APPROVE-PUSH-PUB-job,PUSH-PRIV-job, SCAN-AND-REPORT-job, BUILD-job, PREPARE-job] | |
| # if: always() | |
| # runs-on: ${{ needs.PREPARE-job.outputs.runner_label }} | |
| # steps: | |
| # - name: Clean-up | |
| # run: | | |
| # sudo rm -rf $HOME/runner/artifacts/${{ needs.PREPARE-job.outputs.dockerfile_name }}-${{ needs.PREPARE-job.outputs.platform_tag }} | |
| # DEPLOY-job: | |
| # needs: [PUSH-PRIV-job,PREPARE-job] | |
| # runs-on: Ella | |
| # if: needs.PREPARE-job.outputs.platform_tag == 'arm' | |
| # env: | |
| # BUCKET: ${{ vars.ACACIA_BUCKETNAME }} # BYO or pawsey0001-image-compilation if compile for project | |
| # DESTINATION_PATH: ${{ needs.PREPARE-job.outputs.dockerfile_name }}-${{ needs.PREPARE-job.outputs.platform_tag }}/${{ needs.PREPARE-job.outputs.date }} | |
| # #environment: | |
| # # name: manual_approval | |
| # steps: | |
| # - name: Checkout repository | |
| # uses: actions/checkout@v4 | |
| # with: | |
| # fetch-depth: 1 # As the runs-on machine maybe different from Build, re-checkout source code. Only the current commit is needed | |
| # - name: Setup rclone | |
| # uses: ./.github/actions/setup-rclone | |
| # with: | |
| # access_key_id: ${{ secrets.ACACIA_ACCESS_KEY_ID }} | |
| # secret_access_key: ${{ secrets.ACACIA_SECRET_ACCESS_KEY }} | |
| # endpoint: https://projects.pawsey.org.au | |
| # bucket: ${{ env.BUCKET }} | |
| # destination_path: ${{ env.DESTINATION_PATH }} | |
| # - name: Deploy ARM image to Ella | |
| # run: | | |
| # echo "Deploying ARM image to Ella" | |
| # echo "Hostname: $(hostname)" | |
| # echo "Deploying image: ${{ needs.PREPARE-job.outputs.dockerfile_name }}-${{ needs.PREPARE-job.outputs.platform_tag }}:${{ needs.PREPARE-job.outputs.date }}" to Ella | |
| # mkdir -p $MYSCRATCH/image/${{ needs.PREPARE-job.outputs.dockerfile_name }}/ | |
| # rclone copy pawsey0001:"${{ env.BUCKET }}/${{ env.DESTINATION_PATH }}/image.tar" $MYSCRATCH/image/${{ needs.PREPARE-job.outputs.dockerfile_name }}/ | |
| # - name: Convert to Singularity File | |
| # run: | | |
| # echo "Converting to Singularity File" | |
| # echo "Converting image: ${{ needs.PREPARE-job.outputs.dockerfile_name }}-${{ needs.PREPARE-job.outputs.platform_tag }}:${{ needs.PREPARE-job.outputs.date }}" to Singularity | |
| # source ~/.bashrc | |
| # singularity build --force $MYSCRATCH/image/${{ needs.PREPARE-job.outputs.dockerfile_name }}/${{ needs.PREPARE-job.outputs.dockerfile_name }}-${{ needs.PREPARE-job.outputs.platform_tag }}.sif docker-archive://$MYSCRATCH/image/${{ needs.PREPARE-job.outputs.dockerfile_name }}/image.tar | |