|
6 | 6 | # 1. Namespace watch (Harvester) |
7 | 7 | # Watches tenant namespaces on the Harvester cluster. For each new namespace |
8 | 8 | # that belongs to a Rancher project it creates: |
| 9 | +# |
| 10 | +# Cloud-provider credentials (for RKE2 Harvester cloud provider): |
9 | 11 | # - ServiceAccount harvester-cloud-provider-<ns> |
10 | 12 | # - RoleBinding to harvesterhci.io:cloudprovider in the tenant namespace |
11 | 13 | # - Optional RoleBinding to view in the project's network namespace |
12 | 14 | # - Long-lived SA token secret |
| 15 | +# |
| 16 | +# Consumer VM-access credentials (for tenant teams provisioning VMs/clusters): |
| 17 | +# - ServiceAccount harvester-vm-access-<ns> |
| 18 | +# - RoleBinding to harvesterhci.io:edit in the tenant namespace |
| 19 | +# - RoleBinding to edit (k8s built-in) in the tenant namespace |
| 20 | +# - RoleBinding to view in harvester-public (for shared OS images) |
| 21 | +# - Long-lived SA token secret |
| 22 | +# - Secret "harvester-vm-kubeconfig" in the tenant namespace containing a |
| 23 | +# namespace-scoped kubeconfig. The platform team retrieves this once at |
| 24 | +# onboarding and hands it to the tenant team. |
| 25 | +# |
13 | 26 | # On namespace deletion it deletes any harvesterconfig-* secrets on Rancher |
14 | | -# whose kubeconfig was built from that namespace's SA token. |
| 27 | +# whose kubeconfig was built from that namespace's SA token, and cleans up |
| 28 | +# the harvester-public RoleBinding for the VM-access SA. |
15 | 29 | # |
16 | 30 | # 2. Cluster watch (Rancher) |
17 | 31 | # Watches clusters.provisioning.cattle.io on the Rancher cluster. For each |
|
22 | 36 | # v2prov-secret-authorized-for-cluster already set at creation time |
23 | 37 | # On cluster deletion it removes harvesterconfig-<cluster-name>. |
24 | 38 | # |
25 | | -# Consumers (tenant teams) only need the rancher2 provider. No Harvester or |
26 | | -# Rancher kubeconfig required on their side. |
27 | | -# |
28 | 39 | # Environment variables (injected by the Deployment): |
29 | 40 | # HARVESTER_API_SERVER — Harvester Kubernetes API server URL |
30 | 41 | # RANCHER_KUBECONFIG — Path to kubeconfig for Rancher's local cluster |
@@ -131,6 +142,114 @@ $(echo "$kubeconfig" | sed 's/^/ /') |
131 | 142 | EOF |
132 | 143 | } |
133 | 144 |
|
| 145 | +# Build a namespace-scoped VM-access kubeconfig for tenant teams and write it |
| 146 | +# as the well-known "harvester-vm-kubeconfig" Secret in the tenant namespace. |
| 147 | +# Consumers retrieve this secret once at onboarding to provision VMs and RKE2 |
| 148 | +# clusters using the workloads/vm and workloads/k8s-cluster OCD modules. |
| 149 | +# Args: ns |
| 150 | +write_vm_kubeconfig() { |
| 151 | + local ns="$1" |
| 152 | + local sa_name="harvester-vm-access-${ns}" |
| 153 | + local secret_name="harvester-vm-kubeconfig" |
| 154 | + |
| 155 | + # ServiceAccount in tenant namespace. |
| 156 | + kubectl create serviceaccount "$sa_name" -n "$ns" \ |
| 157 | + --dry-run=client -o yaml | kubectl apply -f - |
| 158 | + |
| 159 | + # RoleBinding — Harvester VM lifecycle (VMs, keypairs, images, NADs, backups). |
| 160 | + kubectl create rolebinding "${sa_name}" \ |
| 161 | + --clusterrole=harvesterhci.io:edit \ |
| 162 | + --serviceaccount="${ns}:${sa_name}" \ |
| 163 | + -n "$ns" --dry-run=client -o yaml | kubectl apply -f - |
| 164 | + |
| 165 | + # RoleBinding — Kubernetes resource edit (Secrets, PVCs, ConfigMaps). |
| 166 | + kubectl create rolebinding "${sa_name}-k8s-edit" \ |
| 167 | + --clusterrole=edit \ |
| 168 | + --serviceaccount="${ns}:${sa_name}" \ |
| 169 | + -n "$ns" --dry-run=client -o yaml | kubectl apply -f - |
| 170 | + |
| 171 | + # RoleBinding — read shared OS images in default namespace. |
| 172 | + kubectl create rolebinding "${ns}-${sa_name}-default-view" \ |
| 173 | + --clusterrole=view \ |
| 174 | + --serviceaccount="${ns}:${sa_name}" \ |
| 175 | + -n "default" --dry-run=client -o yaml | kubectl apply -f - |
| 176 | + |
| 177 | + # RoleBinding — read shared OS images in harvester-public. |
| 178 | + kubectl create rolebinding "${ns}-${sa_name}-public-view" \ |
| 179 | + --clusterrole=view \ |
| 180 | + --serviceaccount="${ns}:${sa_name}" \ |
| 181 | + -n "harvester-public" --dry-run=client -o yaml | kubectl apply -f - |
| 182 | + |
| 183 | + # Long-lived token secret. |
| 184 | + kubectl apply -f - <<EOF |
| 185 | +apiVersion: v1 |
| 186 | +kind: Secret |
| 187 | +metadata: |
| 188 | + name: ${sa_name}-token |
| 189 | + namespace: ${ns} |
| 190 | + annotations: |
| 191 | + kubernetes.io/service-account.name: ${sa_name} |
| 192 | +type: kubernetes.io/service-account-token |
| 193 | +EOF |
| 194 | + |
| 195 | + # Wait for the token to be populated by the token controller. |
| 196 | + local token="" |
| 197 | + for _ in $(seq 1 20); do |
| 198 | + token=$(kubectl get secret "${sa_name}-token" -n "$ns" \ |
| 199 | + -o jsonpath='{.data.token}' 2>/dev/null || true) |
| 200 | + [[ -n "$token" ]] && break |
| 201 | + sleep 1 |
| 202 | + done |
| 203 | + if [[ -z "$token" ]]; then |
| 204 | + log " ERROR: VM access token not populated for ${sa_name} in ${ns}" |
| 205 | + return 1 |
| 206 | + fi |
| 207 | + |
| 208 | + local token_decoded ca_cert_b64 |
| 209 | + token_decoded=$(echo "$token" | base64 -d) |
| 210 | + ca_cert_b64=$(kubectl get configmap kube-root-ca.crt -n kube-system \ |
| 211 | + -o jsonpath='{.data.ca\.crt}' | base64 | tr -d '\n') |
| 212 | + |
| 213 | + local kubeconfig |
| 214 | + kubeconfig=$(cat <<EOF |
| 215 | +apiVersion: v1 |
| 216 | +kind: Config |
| 217 | +clusters: |
| 218 | +- name: harvester |
| 219 | + cluster: |
| 220 | + certificate-authority-data: ${ca_cert_b64} |
| 221 | + server: ${HARVESTER_API_SERVER} |
| 222 | +users: |
| 223 | +- name: ${ns} |
| 224 | + user: |
| 225 | + token: ${token_decoded} |
| 226 | +contexts: |
| 227 | +- name: ${ns}@harvester |
| 228 | + context: |
| 229 | + cluster: harvester |
| 230 | + namespace: ${ns} |
| 231 | + user: ${ns} |
| 232 | +current-context: ${ns}@harvester |
| 233 | +EOF |
| 234 | +) |
| 235 | + |
| 236 | + kubectl apply -f - <<EOF |
| 237 | +apiVersion: v1 |
| 238 | +kind: Secret |
| 239 | +metadata: |
| 240 | + name: ${secret_name} |
| 241 | + namespace: ${ns} |
| 242 | + annotations: |
| 243 | + platform.wso2.com/vm-access-sa: "${sa_name}" |
| 244 | +type: Opaque |
| 245 | +stringData: |
| 246 | + kubeconfig: | |
| 247 | +$(echo "$kubeconfig" | sed 's/^/ /') |
| 248 | +EOF |
| 249 | + |
| 250 | + log " [ns] VM access kubeconfig ready: ${secret_name} in ${ns}" |
| 251 | +} |
| 252 | + |
134 | 253 | # ── Namespace watch handlers ─────────────────────────────────────────────────── |
135 | 254 |
|
136 | 255 | on_added_namespace() { |
@@ -179,6 +298,11 @@ type: kubernetes.io/service-account-token |
179 | 298 | EOF |
180 | 299 |
|
181 | 300 | log " [ns] SA ready: ${sa_name} in ${ns}" |
| 301 | + |
| 302 | + # Consumer VM-access kubeconfig — separate SA with broader permissions. |
| 303 | + # Explicit return propagates failure to the caller so the namespace is NOT |
| 304 | + # marked processed; the watch loop will retry on the next event. |
| 305 | + write_vm_kubeconfig "$ns" || return 1 |
182 | 306 | } |
183 | 307 |
|
184 | 308 | on_deleted_namespace() { |
@@ -216,6 +340,14 @@ on_deleted_namespace() { |
216 | 340 | kubectl delete rolebinding "$rb_name_found" -n "$rb_ns" 2>/dev/null \ |
217 | 341 | && log " [ns] deleted rolebinding ${rb_name_found} from ${rb_ns}" |
218 | 342 | done || true |
| 343 | + |
| 344 | + # Delete the VM-access SA's cross-namespace RoleBindings. |
| 345 | + # (Resources inside the deleted namespace are cleaned up by Kubernetes.) |
| 346 | + local vm_sa_name="harvester-vm-access-${ns}" |
| 347 | + kubectl delete rolebinding "${ns}-${vm_sa_name}-default-view" -n "default" \ |
| 348 | + 2>/dev/null && log " [ns] deleted default RoleBinding for ${vm_sa_name}" || true |
| 349 | + kubectl delete rolebinding "${ns}-${vm_sa_name}-public-view" -n "harvester-public" \ |
| 350 | + 2>/dev/null && log " [ns] deleted harvester-public RoleBinding for ${vm_sa_name}" || true |
219 | 351 | } |
220 | 352 |
|
221 | 353 | # ── Cluster watch handlers ───────────────────────────────────────────────────── |
@@ -408,9 +540,26 @@ kubectl get namespaces -o json | jq -r ' |
408 | 540 | [[ -z "$project_id" ]] && continue |
409 | 541 | is_system_namespace "$ns" && continue |
410 | 542 | [[ "$role" == "network-namespace" ]] && continue |
411 | | - log "INIT namespace: ${ns} (project: ${project_id})" |
412 | | - if on_added_namespace "$ns" "$project_id"; then |
413 | | - echo "$ns" >> "$PROCESSED_NS_FILE" |
| 543 | + sa_name="harvester-cloud-provider-${ns}" |
| 544 | + if kubectl get secret "${sa_name}-token" -n "$ns" &>/dev/null; then |
| 545 | + # Cloud-provider credentials already exist. Check for the VM-access kubeconfig |
| 546 | + # separately — may be absent on pods that ran before this feature was added. |
| 547 | + if kubectl get secret "harvester-vm-kubeconfig" -n "$ns" &>/dev/null; then |
| 548 | + log "INIT namespace: ${ns} — already provisioned, skipping" |
| 549 | + echo "$ns" >> "$PROCESSED_NS_FILE" |
| 550 | + else |
| 551 | + log "INIT namespace: ${ns} — backfilling VM access kubeconfig" |
| 552 | + if write_vm_kubeconfig "$ns"; then |
| 553 | + echo "$ns" >> "$PROCESSED_NS_FILE" |
| 554 | + else |
| 555 | + log " WARN: VM access kubeconfig backfill failed for ${ns} — will retry on next watch event" |
| 556 | + fi |
| 557 | + fi |
| 558 | + else |
| 559 | + log "INIT namespace: ${ns} (project: ${project_id})" |
| 560 | + if on_added_namespace "$ns" "$project_id"; then |
| 561 | + echo "$ns" >> "$PROCESSED_NS_FILE" |
| 562 | + fi |
414 | 563 | fi |
415 | 564 | done |
416 | 565 |
|
|
0 commit comments