Skip to content

Commit 4870c7b

Browse files
committed
Fix review findings and restrict ODF to disconnected mode
- Pass shell variables to inline Python via os.environ instead of string interpolation to prevent injection risks - Redact credentials from setup_ceph.sh stdout output - Use if/else for prometheus module enable instead of unconditional success after || true - Quote quayBackendRGWConfiguration values in odf.sh for YAML safety - Add language identifiers to fenced code blocks in ODF_CEPH_CI.md - Fix generate_enclave_vars.sh summary to reflect ODF/RadosGWStorage when storage plugin is odf - Download cephadm from official download.ceph.com with GitHub fallback - Remove connected ODF job (ODF only runs in disconnected mode) - When ODF is selected, skip LVMS disconnected to avoid parallel runs - Reduce master VM extra disk to 60G for ODF (storage is on LZ)
1 parent 409082d commit 4870c7b

10 files changed

Lines changed: 108 additions & 72 deletions

File tree

.github/workflows/e2e-deployment.yml

Lines changed: 15 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -60,6 +60,7 @@ jobs:
6060
outputs:
6161
should_run: ${{ steps.decision.outputs.should_run }}
6262
storage_plugins: ${{ steps.decision.outputs.storage_plugins }}
63+
storage_plugins_connected: ${{ steps.decision.outputs.storage_plugins_connected }}
6364

6465
steps:
6566
- name: Checkout code
@@ -101,31 +102,41 @@ jobs:
101102
run: |
102103
# Determine whether to run and which storage plugins to test.
103104
# storage_plugins is a JSON array consumed by the job matrix.
105+
# ODF only runs in disconnected mode. Connected always uses lvms.
104106
if [[ "${{ github.event_name }}" == "workflow_dispatch" ]]; then
105107
echo "should_run=true" >> $GITHUB_OUTPUT
106108
PLUGIN="${{ inputs.storage-plugin || 'lvms' }}"
107109
echo "storage_plugins=[\"${PLUGIN}\"]" >> $GITHUB_OUTPUT
110+
if [[ "$PLUGIN" == "odf" ]]; then
111+
echo "storage_plugins_connected=[\"lvms\"]" >> $GITHUB_OUTPUT
112+
else
113+
echo "storage_plugins_connected=[\"${PLUGIN}\"]" >> $GITHUB_OUTPUT
114+
fi
108115
echo "Manual trigger - E2E will run (${PLUGIN})" | tee -a $GITHUB_STEP_SUMMARY
109116
elif [[ "${{ github.event_name }}" == "schedule" ]]; then
110117
echo "should_run=true" >> $GITHUB_OUTPUT
111118
echo "storage_plugins=[\"lvms\"]" >> $GITHUB_OUTPUT
119+
echo "storage_plugins_connected=[\"lvms\"]" >> $GITHUB_OUTPUT
112120
echo "Scheduled trigger - E2E will run" | tee -a $GITHUB_STEP_SUMMARY
113121
elif [[ "${{ github.event_name }}" == "merge_group" ]]; then
114122
echo "should_run=true" >> $GITHUB_OUTPUT
115123
echo "storage_plugins=[\"lvms\"]" >> $GITHUB_OUTPUT
124+
echo "storage_plugins_connected=[\"lvms\"]" >> $GITHUB_OUTPUT
116125
echo "Merge queue - E2E will run" | tee -a $GITHUB_STEP_SUMMARY
117126
elif [[ "${{ steps.filter.outputs.e2e }}" == "true" ]]; then
118127
echo "should_run=true" >> $GITHUB_OUTPUT
128+
echo "storage_plugins_connected=[\"lvms\"]" >> $GITHUB_OUTPUT
119129
if [[ "${{ steps.filter.outputs.odf }}" == "true" ]]; then
120-
echo "storage_plugins=[\"lvms\",\"odf\"]" >> $GITHUB_OUTPUT
121-
echo "E2E-relevant files changed (including ODF) - both LVMS and ODF will run" | tee -a $GITHUB_STEP_SUMMARY
130+
echo "storage_plugins=[\"odf\"]" >> $GITHUB_OUTPUT
131+
echo "E2E-relevant files changed (including ODF) - ODF disconnected will run" | tee -a $GITHUB_STEP_SUMMARY
122132
else
123133
echo "storage_plugins=[\"lvms\"]" >> $GITHUB_OUTPUT
124134
echo "E2E-relevant files changed - E2E will run" | tee -a $GITHUB_STEP_SUMMARY
125135
fi
126136
else
127137
echo "should_run=false" >> $GITHUB_OUTPUT
128138
echo "storage_plugins=[\"lvms\"]" >> $GITHUB_OUTPUT
139+
echo "storage_plugins_connected=[\"lvms\"]" >> $GITHUB_OUTPUT
129140
echo "Only documentation/config files changed - E2E will be skipped" | tee -a $GITHUB_STEP_SUMMARY
130141
echo "To run E2E anyway, use manual workflow_dispatch" | tee -a $GITHUB_STEP_SUMMARY
131142
fi
@@ -145,7 +156,7 @@ jobs:
145156
strategy:
146157
fail-fast: false
147158
matrix:
148-
storage-plugin: ${{ fromJSON(needs.check-e2e-needed.outputs.storage_plugins) }}
159+
storage-plugin: ${{ fromJSON(needs.check-e2e-needed.outputs.storage_plugins_connected) }}
149160

150161
concurrency:
151162
group: enclave-ci-connected-${{ matrix.storage-plugin }}-${{ github.run_id }}
@@ -552,7 +563,7 @@ jobs:
552563
if: >-
553564
needs.check-e2e-needed.outputs.should_run == 'true' &&
554565
(github.event_name != 'workflow_dispatch' || inputs.run-disconnected == true)
555-
runs-on: [self-hosted, enclave-large]
566+
runs-on: ${{ matrix.storage-plugin == 'odf' && fromJSON('["self-hosted", "enclave-large", "odf"]') || fromJSON('["self-hosted", "enclave-large"]') }}
556567
timeout-minutes: ${{ github.event_name == 'schedule' && 600 || 360 }}
557568

558569
strategy:

.github/workflows/e2e-odf-schedule.yml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -23,8 +23,8 @@ jobs:
2323
run: |
2424
gh workflow run e2e-deployment.yml \
2525
--ref "${{ github.ref }}" \
26-
-f run-connected=true \
26+
-f run-connected=false \
2727
-f run-disconnected=true \
2828
-f storage-plugin=odf \
2929
-R "${{ github.repository }}"
30-
echo "Dispatched E2E Deployment with ODF storage plugin" | tee -a $GITHUB_STEP_SUMMARY
30+
echo "Dispatched E2E Disconnected with ODF storage plugin" | tee -a $GITHUB_STEP_SUMMARY

.github/workflows/slash-command.yml

Lines changed: 0 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -100,7 +100,6 @@ jobs:
100100
| `/test tarball` | Build and push tarball | pr-validation |
101101
| `/test infra` | Infrastructure verification | enclave-small |
102102
| `/test e2e-connected` | Dispatch E2E (connected only) | enclave-large |
103-
| `/test e2e-connected-odf` | Dispatch E2E connected with ODF storage | enclave-large |
104103
| `/test e2e-disconnected` | Dispatch E2E (disconnected only) | enclave-large |
105104
| `/test e2e-disconnected-odf` | Dispatch E2E disconnected with ODF storage | enclave-large |
106105
| `/test cleanup` | Run cleanup workflow | enclave-small |
@@ -211,13 +210,6 @@ jobs:
211210
-f run-disconnected=false
212211
track_result "E2E Connected" $?
213212
;;
214-
e2e-connected-odf)
215-
dispatch_workflow e2e-deployment.yml \
216-
-f run-connected=true \
217-
-f run-disconnected=false \
218-
-f storage-plugin=odf
219-
track_result "E2E Connected ODF" $?
220-
;;
221213
e2e-disconnected)
222214
dispatch_workflow e2e-deployment.yml \
223215
-f run-connected=false \
@@ -339,7 +331,6 @@ jobs:
339331
tarball) echo "build-push-tarball.yml" ;;
340332
infra) echo "infra-verify.yml" ;;
341333
e2e-connected) echo "e2e-deployment.yml" ;;
342-
e2e-connected-odf) echo "e2e-deployment.yml" ;;
343334
e2e-disconnected) echo "e2e-deployment.yml" ;;
344335
e2e-disconnected-odf) echo "e2e-deployment.yml" ;;
345336
cleanup) echo "cleanup.yml" ;;

docs/ODF_CEPH_CI.md

Lines changed: 5 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@ This document describes the containerized Ceph cluster used to provide external
66

77
ODF in external mode connects to a pre-existing Ceph cluster rather than deploying its own. For CI, we run a single-node Ceph cluster on the Landing Zone VM using cephadm. All Ceph daemons run as podman containers on the LZ, which shares the same libvirt network as the OpenShift nodes.
88

9-
```
9+
```text
1010
CI Runner Machine (runs: [self-hosted, enclave-large])
1111
├── libvirt VMs
1212
│ ├── Landing Zone VM (192.168.X.2)
@@ -32,7 +32,7 @@ CI Runner Machine (runs: [self-hosted, enclave-large])
3232

3333
Cephadm filters out raw loop devices, so each OSD uses an LVM stack:
3434

35-
```
35+
```text
3636
Sparse file (20GB) Loop device LVM
3737
osd-0.img ──────────> /dev/loop0 ──────> ceph-vg0/ceph-lv0 ──> OSD.0
3838
osd-1.img ──────────> /dev/loop1 ──────> ceph-vg1/ceph-lv1 ──> OSD.1
@@ -69,15 +69,15 @@ osd-2.img ──────────> /dev/loop2 ──────> cep
6969

7070
Ceph runs on the Landing Zone VM, which is on the same libvirt cluster network as the OpenShift master nodes. All communication is direct:
7171

72-
```
72+
```text
7373
Master node (192.168.X.10) -> Landing Zone (192.168.X.2:9283/7480) -- same L2 network
7474
```
7575

7676
No firewall configuration is needed. No gateway routing. No SDN workarounds.
7777

7878
## How ODF Config Flows to the Cluster
7979

80-
```
80+
```text
8181
setup_ceph.sh runs on LZ (via SSH from CI runner)
8282
↓ writes files to ~/ceph-config/
8383
├── odf_external_config.json
@@ -104,7 +104,7 @@ The `ODF_EXTERNAL_CONFIG` and `QUAY_BACKEND_RGW_CONFIG` environment variables ar
104104

105105
### Runner Labels
106106

107-
ODF runs use the same runner labels as LVMS: `[self-hosted, enclave-large]`. No special `odf` runner label is required since Ceph is deployed dynamically on the Landing Zone.
107+
ODF runs use the runner labels `[self-hosted, enclave-large, odf]`. The `odf` label ensures ODF jobs are routed to runners with sufficient disk space for Ceph loopback OSDs.
108108

109109
### Ceph Setup Step
110110

@@ -121,7 +121,6 @@ This step is conditional on `STORAGE_PLUGIN == 'odf'` and is skipped for LVMS ru
121121

122122
| Command | Description |
123123
|---------|-------------|
124-
| `/test e2e-connected-odf` | Connected mode E2E with ODF storage |
125124
| `/test e2e-disconnected-odf` | Disconnected mode E2E with ODF storage |
126125

127126
## Setup

scripts/deployment/deploy_cluster.sh

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -192,8 +192,10 @@ if [ -n "${ENABLED_PLUGINS:-}" ]; then
192192
fi
193193

194194
# Load ODF external config and Quay RGW config (from env vars or LZ files)
195-
source "${ENCLAVE_DIR}/scripts/lib/odf.sh"
196-
append_odf_extra_vars
195+
if [ "${STORAGE_PLUGIN:-}" = "odf" ]; then
196+
source "${ENCLAVE_DIR}/scripts/lib/odf.sh"
197+
append_odf_extra_vars
198+
fi
197199

198200
# Create the extra vars file on Landing Zone
199201
ssh_exec "mkdir -p $LZ_ENCLAVE_DIR/config"

scripts/deployment/deploy_phase.sh

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -124,8 +124,10 @@ if [ "${ENCLAVE_MIRROR_DRY_RUN:-}" = "true" ]; then
124124
fi
125125

126126
# Load ODF external config and Quay RGW config (from env vars or LZ files)
127-
source "${ENCLAVE_DIR}/scripts/lib/odf.sh"
128-
append_odf_extra_vars
127+
if [ "${STORAGE_PLUGIN:-}" = "odf" ]; then
128+
source "${ENCLAVE_DIR}/scripts/lib/odf.sh"
129+
append_odf_extra_vars
130+
fi
129131

130132
# Create the extra vars file on Landing Zone
131133
# shellcheck disable=SC2087,SC2086 # We want client-side expansion of $EXTRA_VARS_CONTENT

scripts/deployment/deploy_plugin.sh

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -132,8 +132,10 @@ if [ -n "${ENABLED_PLUGINS:-}" ]; then
132132
fi
133133

134134
# Load ODF external config and Quay RGW config (from env vars or LZ files)
135-
source "${ENCLAVE_DIR}/scripts/lib/odf.sh"
136-
append_odf_extra_vars
135+
if [ "${STORAGE_PLUGIN:-}" = "odf" ]; then
136+
source "${ENCLAVE_DIR}/scripts/lib/odf.sh"
137+
append_odf_extra_vars
138+
fi
137139

138140
# Create the extra vars file on Landing Zone
139141
# shellcheck disable=SC2087,SC2086 # We want client-side expansion of $EXTRA_VARS_CONTENT

scripts/infrastructure/generate_enclave_vars.sh

Lines changed: 7 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -321,10 +321,15 @@ echo ""
321321
# Calculate worker IP range for informational message
322322
WORKER_IP_START=$(echo "$CLUSTER_NETWORK" | awk -F. '{print $1"."$2"."$3".20"}')
323323
WORKER_IP_END=$(echo "$CLUSTER_NETWORK" | awk -F. -v count=$MASTER_COUNT '{print $1"."$2"."$3"."20+count-1}')
324+
ACTIVE_STORAGE="${STORAGE_PLUGIN:-lvms}"
325+
ACTIVE_REGISTRY="LocalStorage"
326+
if [ "$ACTIVE_STORAGE" = "odf" ]; then
327+
ACTIVE_REGISTRY="RadosGWStorage"
328+
fi
324329
info "Generated configuration uses:"
325330
info " - Worker IPs: ${WORKER_IP_START}-${WORKER_IP_END} (will be assigned during deployment)"
326-
info " - Storage: LVMS with /dev/vda root disk"
327-
info " - Registry: LocalStorage"
331+
info " - Storage: ${ACTIVE_STORAGE} with /dev/vda root disk"
332+
info " - Registry: ${ACTIVE_REGISTRY}"
328333
info " - Pull secret: Embedded in config/global.yaml (written to pullSecretPath at runtime)"
329334
info " - SSL certificates: Self-signed (generated for CI/testing)"
330335
echo ""

scripts/infrastructure/setup_ceph.sh

Lines changed: 62 additions & 41 deletions
Original file line numberDiff line numberDiff line change
@@ -137,10 +137,15 @@ info "Step 1: Installing cephadm and ceph-common (release: $CEPH_RELEASE)..."
137137
if command -v cephadm &>/dev/null; then
138138
info "cephadm already installed"
139139
else
140-
# Download cephadm directly from the Ceph project
141-
info "Downloading cephadm from Ceph project..."
142-
curl --silent --remote-name --location \
143-
"https://raw.githubusercontent.com/ceph/ceph/${CEPH_RELEASE}/src/cephadm/cephadm.py"
140+
# Download cephadm from the official Ceph download server
141+
info "Downloading cephadm from download.ceph.com..."
142+
curl --silent --location -o cephadm.py \
143+
"https://download.ceph.com/rpm-${CEPH_RELEASE}/el9/noarch/cephadm" || {
144+
# Fallback to GitHub if the official server doesn't have this release
145+
warn "Official download server failed, falling back to GitHub..."
146+
curl --silent --remote-name --location \
147+
"https://raw.githubusercontent.com/ceph/ceph/${CEPH_RELEASE}/src/cephadm/cephadm.py"
148+
}
144149

145150
# Try to add repo and install via package manager
146151
if python3 cephadm.py add-repo --release "$CEPH_RELEASE" 2>/dev/null; then
@@ -320,8 +325,11 @@ wait_for_rgw
320325

321326
# Enable MGR prometheus module for monitoring endpoint (port 9283, required by ODF)
322327
info "Enabling MGR prometheus module for monitoring metrics..."
323-
cephadm shell -- ceph mgr module enable prometheus 2>/dev/null || true
324-
success "MGR prometheus module enabled (port 9283)"
328+
if cephadm shell -- ceph mgr module enable prometheus 2>/dev/null; then
329+
success "MGR prometheus module enabled (port 9283)"
330+
else
331+
warn "MGR prometheus module enable failed - monitoring metrics may be unavailable"
332+
fi
325333

326334
# Step 8: Create S3 user and bucket
327335
info "Step 8: Creating S3 user and bucket..."
@@ -345,21 +353,28 @@ S3_SECRET_KEY=$(cephadm shell -- radosgw-admin user info --uid="$S3_USER" 2>/dev
345353
if cephadm shell -- radosgw-admin bucket stats --bucket="$S3_BUCKET" &>/dev/null; then
346354
info "Bucket '$S3_BUCKET' already exists"
347355
else
356+
S3_ACCESS_KEY="$S3_ACCESS_KEY" \
357+
S3_SECRET_KEY="$S3_SECRET_KEY" \
358+
CEPH_HOST_IP="$CEPH_HOST_IP" \
359+
RGW_PORT="$RGW_PORT" \
360+
S3_BUCKET="$S3_BUCKET" \
348361
python3 -c "
349-
import urllib.request, hmac, hashlib, base64, datetime
350-
host = '${CEPH_HOST_IP}:${RGW_PORT}'
351-
bucket = '${S3_BUCKET}'
362+
import os, urllib.request, hmac, hashlib, base64, datetime
363+
host = os.environ['CEPH_HOST_IP'] + ':' + os.environ['RGW_PORT']
364+
bucket = os.environ['S3_BUCKET']
365+
access_key = os.environ['S3_ACCESS_KEY']
366+
secret_key = os.environ['S3_SECRET_KEY']
352367
date = datetime.datetime.utcnow().strftime('%a, %d %b %Y %H:%M:%S +0000')
353368
string_to_sign = 'PUT\n\n\n' + date + '\n/' + bucket
354369
sig = base64.b64encode(hmac.new(
355-
'${S3_SECRET_KEY}'.encode(), string_to_sign.encode(), hashlib.sha1
370+
secret_key.encode(), string_to_sign.encode(), hashlib.sha1
356371
).digest()).decode()
357372
req = urllib.request.Request(
358373
'http://' + host + '/' + bucket,
359374
method='PUT',
360375
headers={
361376
'Date': date,
362-
'Authorization': 'AWS ${S3_ACCESS_KEY}:' + sig,
377+
'Authorization': 'AWS ' + access_key + ':' + sig,
363378
}
364379
)
365380
urllib.request.urlopen(req)
@@ -434,16 +449,26 @@ print(','.join(addrs))
434449
# Note: rook-csi-rbd-node and rook-csi-rbd-provisioner are NOT included here.
435450
# Rook creates these secrets itself using the admin credentials. Including them
436451
# causes a type conflict: ODF creates them as Opaque, rook expects kubernetes.io/rook.
437-
ODF_EXTERNAL_CONFIG=$(python3 -c "
438-
import json
452+
ODF_EXTERNAL_CONFIG=$(
453+
FSID="$FSID" \
454+
MON_HOSTS="$MON_HOSTS" \
455+
ADMIN_KEY="$ADMIN_KEY" \
456+
CEPH_HOST_IP="$CEPH_HOST_IP" \
457+
RGW_PORT="$RGW_PORT" \
458+
RBD_POOL="$RBD_POOL" \
459+
RGW_ACCESS_KEY="$RGW_ACCESS_KEY" \
460+
RGW_SECRET_KEY="$RGW_SECRET_KEY" \
461+
python3 -c "
462+
import os, json
463+
e = os.environ
439464
data = [
440-
{'name': 'rook-ceph-mon-endpoints', 'kind': 'ConfigMap', 'data': {'data': '${FSID}=${MON_HOSTS}', 'maxMonId': '0', 'mapping': '{}'}},
441-
{'name': 'rook-ceph-mon', 'kind': 'Secret', 'data': {'admin-secret': 'admin-secret', 'fsid': '${FSID}', 'mon-secret': 'mon-secret'}},
442-
{'name': 'rook-ceph-operator-creds', 'kind': 'Secret', 'data': {'userID': 'client.admin', 'userKey': '${ADMIN_KEY}'}},
443-
{'name': 'monitoring-endpoint', 'kind': 'CephCluster', 'data': {'MonitoringEndpoint': '${CEPH_HOST_IP}', 'MonitoringPort': '9283'}},
444-
{'name': 'ceph-rbd', 'kind': 'StorageClass', 'data': {'pool': '${RBD_POOL}'}},
445-
{'name': 'ceph-rgw', 'kind': 'StorageClass', 'data': {'endpoint': '${CEPH_HOST_IP}:${RGW_PORT}', 'poolPrefix': 'default'}},
446-
{'name': 'rgw-admin-ops-user', 'kind': 'Secret', 'data': {'accessKey': '${RGW_ACCESS_KEY}', 'secretKey': '${RGW_SECRET_KEY}'}}
465+
{'name': 'rook-ceph-mon-endpoints', 'kind': 'ConfigMap', 'data': {'data': e['FSID']+'='+e['MON_HOSTS'], 'maxMonId': '0', 'mapping': '{}'}},
466+
{'name': 'rook-ceph-mon', 'kind': 'Secret', 'data': {'admin-secret': 'admin-secret', 'fsid': e['FSID'], 'mon-secret': 'mon-secret'}},
467+
{'name': 'rook-ceph-operator-creds', 'kind': 'Secret', 'data': {'userID': 'client.admin', 'userKey': e['ADMIN_KEY']}},
468+
{'name': 'monitoring-endpoint', 'kind': 'CephCluster', 'data': {'MonitoringEndpoint': e['CEPH_HOST_IP'], 'MonitoringPort': '9283'}},
469+
{'name': 'ceph-rbd', 'kind': 'StorageClass', 'data': {'pool': e['RBD_POOL']}},
470+
{'name': 'ceph-rgw', 'kind': 'StorageClass', 'data': {'endpoint': e['CEPH_HOST_IP']+':'+e['RGW_PORT'], 'poolPrefix': 'default'}},
471+
{'name': 'rgw-admin-ops-user', 'kind': 'Secret', 'data': {'accessKey': e['RGW_ACCESS_KEY'], 'secretKey': e['RGW_SECRET_KEY']}}
447472
]
448473
print(json.dumps(data))
449474
")
@@ -522,7 +547,7 @@ else
522547
fi
523548

524549
# ============================================================================
525-
# Output GitHub Secrets
550+
# Output Summary (credentials redacted)
526551
# ============================================================================
527552
echo ""
528553
echo "============================================================================"
@@ -531,28 +556,24 @@ echo "==========================================================================
531556
echo ""
532557
echo "Ceph cluster is running on $CEPH_HOST_IP"
533558
echo ""
534-
echo "--- GitHub Actions Variable ---"
535-
echo ""
536-
echo " CEPH_HOST_IP=$CEPH_HOST_IP"
537-
echo ""
538-
echo "--- GitHub Actions Secrets ---"
539-
echo ""
540-
echo "Set the following as repository secrets:"
541-
echo ""
542-
echo "1) ODF_EXTERNAL_CONFIG:"
543-
echo ""
544-
echo "$ODF_EXTERNAL_CONFIG"
545-
echo ""
546-
echo "2) QUAY_BACKEND_RGW_CONFIG:"
559+
echo " FSID: $(cephadm shell -- ceph fsid 2>/dev/null || echo 'unknown')"
560+
echo " MON: $CEPH_HOST_IP:3300,6789"
561+
echo " RGW: http://$CEPH_HOST_IP:$RGW_PORT"
562+
echo " Metrics: http://$CEPH_HOST_IP:9283"
563+
echo " RBD pool: $RBD_POOL"
564+
echo " S3 bucket: $S3_BUCKET"
565+
echo " S3 user: $S3_USER"
547566
echo ""
548-
549-
# Build YAML object for Quay RGW configuration
550-
cat <<EOF
551-
{access_key: ${S3_ACCESS_KEY}, secret_key: ${S3_SECRET_KEY}, bucket_name: ${S3_BUCKET}, hostname: ${CEPH_HOST_IP}, port: ${RGW_PORT}, is_secure: false}
552-
EOF
553-
567+
if [ -n "$CEPH_CONFIG_DIR" ]; then
568+
echo "Config files: $CEPH_CONFIG_DIR/"
569+
echo " - odf_external_config.json"
570+
echo " - quay_backend_rgw_config.yaml"
571+
else
572+
echo "Credentials (set as GitHub secrets for manual use):"
573+
echo " ODF_EXTERNAL_CONFIG: <use CEPH_CONFIG_DIR to write to file>"
574+
echo " QUAY_BACKEND_RGW_CONFIG: <use CEPH_CONFIG_DIR to write to file>"
575+
fi
554576
echo ""
555-
echo "============================================================================"
556577
echo "To verify:"
557578
echo " ceph health"
558579
echo " curl http://${CEPH_HOST_IP}:${RGW_PORT}/"

scripts/setup/configure_devscripts.sh

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -84,13 +84,16 @@ else
8484
MASTER_MEMORY_VAL=32768 # 32 GB for disconnected
8585
fi
8686

87-
# ODF requires additional resources for Ceph/Rook operators, noobaa, and CSI daemons
87+
# ODF requires additional resources for Ceph/Rook operators, noobaa, and CSI daemons.
88+
# With ODF, storage is provided by Ceph on the LZ -- not local LVMS disks on masters --
89+
# so masters need smaller extra disks while the LZ needs a larger disk for Ceph OSDs.
8890
STORAGE_PLUGIN="${STORAGE_PLUGIN:-lvms}"
8991
MASTER_VCPU_VAL=12
9092
LANDINGZONE_DISK_VAL=60
9193
if [ "$STORAGE_PLUGIN" = "odf" ]; then
9294
MASTER_VCPU_VAL=16 # 16 vCPUs (up from 12)
93-
MASTER_MEMORY_VAL=$((MASTER_MEMORY_VAL + 8192)) # +8 GB RAM
95+
MASTER_MEMORY_VAL=$((MASTER_MEMORY_VAL + 16384)) # +16 GB RAM (ODF/rook/noobaa + Quay)
96+
VM_EXTRADISKS_SIZE_VAL="60G" # Masters don't need large LVMS disks with ODF
9497
LANDINGZONE_DISK_VAL=500 # Ceph OSDs store mirrored images on the LZ disk
9598
fi
9699

0 commit comments

Comments
 (0)