diff --git a/.github/workflows/e2e-tests.yaml b/.github/workflows/e2e-tests.yaml index 1fe3bf411..745d4beac 100644 --- a/.github/workflows/e2e-tests.yaml +++ b/.github/workflows/e2e-tests.yaml @@ -257,3 +257,93 @@ jobs: with: name: s3-tls-verify-artifacts path: artifacts + + e2e-tests-tls: + name: E2E TLS + runs-on: ubuntu-22.04-8core + timeout-minutes: 45 + needs: dev-image + env: + CLOUDSERVER_TAG: ${{ vars.CLOUDSERVER_RING_9_5 }} + steps: + - name: Check out repository + uses: actions/checkout@v5 + with: + ref: ${{ github.sha }} + fetch-depth: 0 + + - name: CI Setup + uses: ./.github/actions/ci-setup + with: + cluster_name: tls-e2e-cluster + + - name: Set up Go + uses: actions/setup-go@v4 + with: + go-version-file: "tests/e2e/go.mod" + cache: true + + - name: Install Ginkgo CLI + run: go install github.com/onsi/ginkgo/v2/ginkgo@v2.25.2 + + - name: Pull Container Images and Download Go Dependencies in Parallel + run: | + CSI_IMAGE_REPOSITORY=ghcr.io/${{ github.repository }} \ + CSI_IMAGE_TAG=${{ github.sha }} \ + CLOUDSERVER_TAG=${CLOUDSERVER_TAG} \ + mage e2e:pullImages + + - name: Load CSI Driver into KIND + run: kind load docker-image ghcr.io/${{ github.repository }}:${{ github.sha }} --name tls-e2e-cluster + + - name: Deploy S3 with TLS + run: mage e2e:deployS3TLS + + - name: Get Host IP Address + id: get_ip + run: echo "host_ip=$(hostname -I | awk '{print $1}')" >> $GITHUB_OUTPUT + + # DNS must be configured at two levels for TLS hostname resolution: + # - /etc/hosts: so the CI runner (Ginkgo test binary) can reach s3.scality.com for bucket operations + # - CoreDNS: so pods inside the KIND cluster (mounter pods, init containers) can reach s3.scality.com + - name: Configure hosts file for S3 FQDN + run: | + echo "${{ steps.get_ip.outputs.host_ip }} s3.scality.com" | sudo tee -a /etc/hosts + + - name: Configure CoreDNS for S3 FQDN + run: S3_HOST_IP=${{ steps.get_ip.outputs.host_ip }} mage e2e:configureCIDNS + + - name: Start Kubernetes Event and Log Capture + run: mage e2e:startCapture + + - name: Apply CRDs + run: mage e2e:applyCRDs + + - name: Run TLS E2E Tests + run: | + mkdir -p test-results + S3_ENDPOINT_URL=https://s3.scality.com:8443 \ + CSI_IMAGE_TAG=${{ github.sha }} \ + CSI_IMAGE_REPOSITORY=ghcr.io/${{ github.repository }} \ + JUNIT_REPORT=./test-results/e2e-tls-tests-results.xml \ + mage e2e:tlsAll + + - name: Stop Capture and Collect Artifacts + if: always() + run: mage e2e:stopCapture + + - name: Upload Test Artifacts + if: always() + uses: actions/upload-artifact@v4 + with: + name: e2e-tls-test-artifacts + path: artifacts + + - name: Upload test results to Codecov + if: ${{ always() }} + uses: codecov/test-results-action@v1 + with: + token: ${{ secrets.CODECOV_TOKEN }} + file: ./tests/e2e/test-results/e2e-tls-tests-results.xml + flags: e2e_tls_tests + slug: scality/mountpoint-s3-csi-driver diff --git a/.markdownlint.yaml b/.markdownlint.yaml index d40757d66..3cb84f751 100644 --- a/.markdownlint.yaml +++ b/.markdownlint.yaml @@ -51,7 +51,7 @@ MD033: MD034: true # Code blocks & emphasis styling -MD046: { style: fenced } # Require triple‑backtick fences +MD046: false # Disabled — MkDocs Material admonitions use indented code blocks MD048: { style: backtick } # Use backticks (```) not tildes (~~~) MD049: { style: consistent } # Whatever emphasis marker (* or _) appears first must be used consistently MD050: { style: asterisk } # Use **asterisk** not __underscore__ for bold diff --git a/charts/scality-mountpoint-s3-csi-driver/templates/controller.yaml b/charts/scality-mountpoint-s3-csi-driver/templates/controller.yaml index 5160219a2..592d784f0 100644 --- a/charts/scality-mountpoint-s3-csi-driver/templates/controller.yaml +++ b/charts/scality-mountpoint-s3-csi-driver/templates/controller.yaml @@ -61,6 +61,11 @@ spec: volumeMounts: - name: socket-dir mountPath: /csi + {{- if .Values.tls.caCertConfigMap }} + - name: custom-ca-cert + mountPath: /etc/ssl/custom-ca + readOnly: true + {{- end }} {{- with .Values.controller.resources }} resources: {{- toYaml . | nindent 12 }} @@ -84,6 +89,10 @@ spec: secretKeyRef: name: {{ .Values.s3CredentialSecret.name }} key: {{ .Values.s3CredentialSecret.secretAccessKey }} + {{- if .Values.tls.caCertConfigMap }} + - name: AWS_CA_BUNDLE + value: "/etc/ssl/custom-ca/ca-bundle.crt" + {{- end }} # Reconciler for MountpointS3PodAttachment CRDs - name: s3-pod-reconciler image: {{ printf "%s%s:%s" (default "" .Values.image.containerRegistry) .Values.image.repository (default (printf "v%s" .Chart.AppVersion) (toString .Values.image.tag)) }} @@ -115,6 +124,20 @@ spec: value: {{ printf "%s:%s" .Values.mountpointPod.headroomImage.repository .Values.mountpointPod.headroomImage.tag | quote }} - name: MOUNTPOINT_IMAGE_PULL_POLICY value: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.tls.caCertConfigMap }} + - name: TLS_CA_CERT_CONFIGMAP + value: {{ .Values.tls.caCertConfigMap | quote }} + - name: TLS_INIT_IMAGE + value: {{ printf "%s:%s" .Values.tls.initImage.repository .Values.tls.initImage.tag | quote }} + - name: TLS_INIT_IMAGE_PULL_POLICY + value: {{ .Values.tls.initImage.pullPolicy | quote }} + - name: TLS_INIT_RESOURCES_REQUESTS_CPU + value: {{ .Values.tls.initResources.requests.cpu | quote }} + - name: TLS_INIT_RESOURCES_REQUESTS_MEMORY + value: {{ .Values.tls.initResources.requests.memory | quote }} + - name: TLS_INIT_RESOURCES_LIMITS_MEMORY + value: {{ .Values.tls.initResources.limits.memory | quote }} + {{- end }} - name: csi-provisioner image: {{ .Values.sidecars.csiProvisioner.image.repository }}:{{ .Values.sidecars.csiProvisioner.image.tag }} imagePullPolicy: {{ .Values.sidecars.csiProvisioner.image.pullPolicy }} @@ -127,3 +150,13 @@ spec: volumes: - name: socket-dir emptyDir: {} + {{- if .Values.tls.caCertConfigMap }} + # ConfigMap volume is NOT optional — if the ConfigMap doesn't exist, the pod stays in + # ContainerCreating with a clear event, matching the behavior of the credentials Secret above. + - name: custom-ca-cert + configMap: + name: {{ .Values.tls.caCertConfigMap }} + items: + - key: ca-bundle.crt + path: ca-bundle.crt + {{- end }} diff --git a/charts/scality-mountpoint-s3-csi-driver/templates/tls-configmaps.yaml b/charts/scality-mountpoint-s3-csi-driver/templates/tls-configmaps.yaml new file mode 100644 index 000000000..3c77b9b7d --- /dev/null +++ b/charts/scality-mountpoint-s3-csi-driver/templates/tls-configmaps.yaml @@ -0,0 +1,29 @@ +{{- if and .Values.tls.caCertConfigMap .Values.tls.caCertData }} +# Helm-managed CA certificate ConfigMap in the controller (release) namespace. +# Created automatically when tls.caCertData is provided via --set-file. +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ .Values.tls.caCertConfigMap }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "scality-mountpoint-s3-csi-driver.labels" . | nindent 4 }} +data: + ca-bundle.crt: | + {{- .Values.tls.caCertData | nindent 4 }} +{{- if ne .Release.Namespace .Values.mountpointPod.namespace }} +--- +# Helm-managed CA certificate ConfigMap in the mounter pod namespace. +# Mounter pods mount this ConfigMap to inject the CA into the system trust store. +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ .Values.tls.caCertConfigMap }} + namespace: {{ .Values.mountpointPod.namespace }} + labels: + {{- include "scality-mountpoint-s3-csi-driver.labels" . | nindent 4 }} +data: + ca-bundle.crt: | + {{- .Values.tls.caCertData | nindent 4 }} +{{- end }} +{{- end }} diff --git a/charts/scality-mountpoint-s3-csi-driver/values.yaml b/charts/scality-mountpoint-s3-csi-driver/values.yaml index 08d20f155..a48df5337 100644 --- a/charts/scality-mountpoint-s3-csi-driver/values.yaml +++ b/charts/scality-mountpoint-s3-csi-driver/values.yaml @@ -140,6 +140,30 @@ mountpointPod: tag: "3.10" pullPolicy: IfNotPresent +# TLS configuration for custom CA certificates +tls: + # Name of the ConfigMap containing the CA certificate bundle (key: ca-bundle.crt). + # Must exist in TWO namespaces: controller namespace and mountpointPod.namespace. + # When tls.caCertData is also set, Helm creates the ConfigMap in both namespaces automatically. + # When tls.caCertData is empty, you must create the ConfigMap manually in both namespaces. + # Leave empty to disable TLS CA certificate injection. + caCertConfigMap: "" + # PEM-encoded CA certificate content. When set together with caCertConfigMap, + # Helm creates the ConfigMap in both namespaces automatically (Helm-managed mode). + # Use: --set-file tls.caCertData=/path/to/your/ca.crt + # When empty, users must create the ConfigMap manually in both namespaces (manual mode). + caCertData: "" + initImage: + repository: alpine + tag: "3.21" + pullPolicy: IfNotPresent + initResources: + requests: + cpu: 10m + memory: 16Mi + limits: + memory: 64Mi + # CRD Cleanup Configuration # Enable automatic cleanup of MountpointS3PodAttachment CRDs and Mountpoint Pods during helm uninstall # Warning: This will forcefully terminate all active S3 mounts diff --git a/cmd/scality-csi-controller/main.go b/cmd/scality-csi-controller/main.go index 62cb7213d..b6e238d9a 100644 --- a/cmd/scality-csi-controller/main.go +++ b/cmd/scality-csi-controller/main.go @@ -7,7 +7,9 @@ import ( "flag" "os" + "github.com/go-logr/logr" corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/runtime" utilruntime "k8s.io/apimachinery/pkg/util/runtime" clientgoscheme "k8s.io/client-go/kubernetes/scheme" @@ -34,6 +36,12 @@ var ( headroomImage = flag.String("headroom-image", os.Getenv("MOUNTPOINT_HEADROOM_IMAGE"), "Image of a pause container to use in spawned Headroom Pods.") mountpointImagePullPolicy = flag.String("mountpoint-image-pull-policy", os.Getenv("MOUNTPOINT_IMAGE_PULL_POLICY"), "Pull policy of Mountpoint images.") mountpointContainerCommand = flag.String("mountpoint-container-command", "/bin/scality-s3-csi-mounter", "Entrypoint command of the Mountpoint Pods.") + tlsCACertConfigMap = flag.String("tls-ca-cert-configmap", os.Getenv("TLS_CA_CERT_CONFIGMAP"), "Name of ConfigMap containing custom CA certificate(s).") + tlsInitImage = flag.String("tls-init-image", os.Getenv("TLS_INIT_IMAGE"), "Image for CA certificate installation initContainer.") + tlsInitImagePullPolicy = flag.String("tls-init-image-pull-policy", os.Getenv("TLS_INIT_IMAGE_PULL_POLICY"), "Pull policy for TLS init image.") + tlsInitResourcesReqCPU = flag.String("tls-init-resources-req-cpu", os.Getenv("TLS_INIT_RESOURCES_REQUESTS_CPU"), "CPU request for TLS init container.") + tlsInitResourcesReqMemory = flag.String("tls-init-resources-req-memory", os.Getenv("TLS_INIT_RESOURCES_REQUESTS_MEMORY"), "Memory request for TLS init container.") + tlsInitResourcesLimMemory = flag.String("tls-init-resources-lim-memory", os.Getenv("TLS_INIT_RESOURCES_LIMITS_MEMORY"), "Memory limit for TLS init container.") ) var scheme = runtime.NewScheme() @@ -79,6 +87,7 @@ func main() { }, CSIDriverVersion: version.GetVersion().DriverVersion, ClusterVariant: cluster.DetectVariant(conf, log), + TLS: buildTLSConfig(log), } // Setup the pod reconciler that will create MountpointS3PodAttachments @@ -105,3 +114,61 @@ func main() { os.Exit(1) } } + +// buildTLSConfig constructs a TLSConfig from flags/env vars. Returns nil if no ConfigMap name is set. +func buildTLSConfig(log logr.Logger) *mppod.TLSConfig { + if *tlsCACertConfigMap == "" { + return nil + } + + initImage := *tlsInitImage + if initImage == "" { + initImage = "alpine:3.21" + } + + pullPolicy := corev1.PullPolicy(*tlsInitImagePullPolicy) + if pullPolicy == "" { + pullPolicy = corev1.PullIfNotPresent + } + + reqCPU := resource.MustParse("10m") + if *tlsInitResourcesReqCPU != "" { + parsed, err := resource.ParseQuantity(*tlsInitResourcesReqCPU) + if err != nil { + log.Error(err, "invalid TLS init CPU request", "value", *tlsInitResourcesReqCPU) + os.Exit(1) + } + reqCPU = parsed + } + + reqMemory := resource.MustParse("16Mi") + if *tlsInitResourcesReqMemory != "" { + parsed, err := resource.ParseQuantity(*tlsInitResourcesReqMemory) + if err != nil { + log.Error(err, "invalid TLS init memory request", "value", *tlsInitResourcesReqMemory) + os.Exit(1) + } + reqMemory = parsed + } + + limMemory := resource.MustParse("64Mi") + if *tlsInitResourcesLimMemory != "" { + parsed, err := resource.ParseQuantity(*tlsInitResourcesLimMemory) + if err != nil { + log.Error(err, "invalid TLS init memory limit", "value", *tlsInitResourcesLimMemory) + os.Exit(1) + } + limMemory = parsed + } + + log.Info("TLS configuration enabled", "configmap", *tlsCACertConfigMap, "initImage", initImage) + + return &mppod.TLSConfig{ + CACertConfigMapName: *tlsCACertConfigMap, + InitImage: initImage, + InitImagePullPolicy: pullPolicy, + InitResourcesReqCPU: reqCPU, + InitResourcesReqMemory: reqMemory, + InitResourcesLimMemory: limMemory, + } +} diff --git a/docs/concepts-and-reference/helm-chart-configuration-reference.md b/docs/concepts-and-reference/helm-chart-configuration-reference.md index 22f0fd9af..031188096 100644 --- a/docs/concepts-and-reference/helm-chart-configuration-reference.md +++ b/docs/concepts-and-reference/helm-chart-configuration-reference.md @@ -22,11 +22,11 @@ These parameters configure the overall behavior of the CSI driver components. ## S3 Global Configuration - + !!! important "Required Configuration" The S3 endpoint URL must be configured for the CSI driver to function. Use the global `s3.endpointUrl` and `s3.region` settings, which are used by both node and controller components for dynamic provisioning. - + | Parameter | Description | Default | Required | |------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------|-----------------------------| @@ -35,10 +35,10 @@ These parameters configure the overall behavior of the CSI driver components. ### Legacy Values (Backward Compatibility) - + !!! warning "Deprecated Configuration" The following legacy Helm values are supported for backward compatibility with earlier versions. **Use the new `s3.*` values for new installations.** Legacy values may be removed in a future release. - + | Legacy Value | New Value | Behavior | |--------------|-----------|----------| @@ -56,14 +56,14 @@ value: {{ coalesce .Values.node.s3EndpointUrl .Values.s3.endpointUrl | quote }} ## S3 Credentials Secret Configuration - + !!! important "Security Note" The Helm chart **does not create secrets automatically**. A Kubernetes Secret containing S3 credentials must be created before installing the chart. The secret must contain the following keys: - `access_key_id`: S3 Access Key ID. - `secret_access_key`: S3 Secret Access Key. - `session_token` (optional): S3 Session Token, if using temporary credentials. - + | Parameter | Description | Default | Required | |------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------|-----------------------------| @@ -74,7 +74,7 @@ value: {{ coalesce .Values.node.s3EndpointUrl .Values.s3.endpointUrl | quote }} ## Node Plugin Configuration - + !!! note "SELinux Context Note" The `node.seLinuxOptions.*` parameters define the SELinux security context for the CSI driver containers. These settings are applied to CSI Node DaemonSet containers and allow the containers to interact with systemd and manage mount points in SELinux-enforced environments. @@ -84,7 +84,7 @@ value: {{ coalesce .Values.node.s3EndpointUrl .Values.s3.endpointUrl | quote }} - `type`: `super_t` - `role`: `system_r` - `level`: `s0` - + | Parameter | Description | Default | Required | |------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------|-----------------------------| @@ -125,11 +125,11 @@ value: {{ coalesce .Values.node.s3EndpointUrl .Values.s3.endpointUrl | quote }} ## Controller Plugin Configuration (Dynamic Provisioning) - + !!! note "Dynamic Provisioning" The controller component is always deployed and provides dynamic provisioning capabilities. It automatically creates and deletes S3 buckets based on PersistentVolumeClaim requests that reference a StorageClass with the CSI driver. - + | Parameter | Description | Default | Required | |------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------|-----------------------------| @@ -138,10 +138,10 @@ value: {{ coalesce .Values.node.s3EndpointUrl .Values.s3.endpointUrl | quote }} ## Mountpoint Pod Configuration (v2.0) - + !!! info "Pod Mounter Strategy" Version 2.0 uses pod-based mounter as the default strategy. Mounter pods are created in the `mount-s3` namespace to handle S3 mount operations with improved isolation and resource management. - + | Parameter | Description | Default | Required | |------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------|-----------------------------| @@ -153,6 +153,25 @@ value: {{ coalesce .Values.node.s3EndpointUrl .Values.s3.endpointUrl | quote }} | `mountpointPod.headroomImage.tag` | Image tag for headroom pods. | `3.10` | No | | `mountpointPod.headroomImage.pullPolicy` | Image pull policy for headroom pods. | `IfNotPresent` | No | +## TLS Configuration + + +!!! info "Custom CA Certificates" + When your S3 endpoint uses TLS with a private or internal CA, configure the `tls.*` parameters to inject the CA certificate. + **Recommended:** Use `tls.caCertData` with `--set-file` so Helm creates the ConfigMap in both namespaces automatically. + See the [TLS Configuration Guide](../driver-deployment/tls-configuration.md) for setup instructions. + +| Parameter | Description | Default | Required | +|------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------|-----------------------------| +| `tls.caCertConfigMap` | Name of the ConfigMap containing the CA certificate bundle (key: `ca-bundle.crt`). When `tls.caCertData` is also set, Helm creates the ConfigMap in both namespaces automatically. When `tls.caCertData` is empty, you must create the ConfigMap manually in both the controller namespace and `mountpointPod.namespace`. Leave empty to disable TLS. | `""` | No | +| `tls.caCertData` | PEM-encoded CA certificate content. When set together with `tls.caCertConfigMap`, Helm creates the ConfigMap in both namespaces automatically. Use `--set-file tls.caCertData=/path/to/ca.crt`. When empty, users must create the ConfigMap manually (see [Manual Mode](../driver-deployment/tls-configuration.md#manual-mode)). | `""` | No | +| `tls.initImage.repository` | Image repository for the CA certificate installation initContainer in mounter pods. | `alpine` | No | +| `tls.initImage.tag` | Image tag for the CA certificate installation initContainer. | `3.21` | No | +| `tls.initImage.pullPolicy` | Pull policy for the CA certificate init image. | `IfNotPresent` | No | +| `tls.initResources.requests.cpu` | CPU request for the CA certificate init container. | `10m` | No | +| `tls.initResources.requests.memory` | Memory request for the CA certificate init container. | `16Mi` | No | +| `tls.initResources.limits.memory` | Memory limit for the CA certificate init container. | `64Mi` | No | + ## CRD Cleanup Configuration (v2.0) | Parameter | Description | Default | Required | diff --git a/docs/driver-deployment/installation-guide.md b/docs/driver-deployment/installation-guide.md index 870976875..cbe9f5f42 100644 --- a/docs/driver-deployment/installation-guide.md +++ b/docs/driver-deployment/installation-guide.md @@ -38,7 +38,6 @@ The installation process consists of: - Set the secret access key. Replace `YOUR_SECRET_ACCESS_KEY` with the actual secret access key. - !!! note To avoid storing sensitive credentials in your shell history, history can be temporarily disabled before running commands with sensitive information: @@ -49,7 +48,6 @@ The installation process consists of: set -o history # turn it back on ``` - ```bash export SECRET_ACCESS_KEY="YOUR_SECRET_ACCESS_KEY" diff --git a/docs/driver-deployment/quick-start.md b/docs/driver-deployment/quick-start.md index 1929e3df1..d972432b4 100644 --- a/docs/driver-deployment/quick-start.md +++ b/docs/driver-deployment/quick-start.md @@ -6,7 +6,6 @@ This guide provides a fast way to deploy the Scality CSI Driver for S3 using Hel Before starting, ensure all requirements outlined in the **[Prerequisites](prerequisites.md)** guide are met. - !!! warning "For Testing Only" The quick start guide is intended for testing purposes only. The installation uses default values including: @@ -15,7 +14,6 @@ Before starting, ensure all requirements outlined in the **[Prerequisites](prere - Default S3 Region (can be overridden at volume level): `us-east-1` For production deployments and to customize these values or use a different namespace, see the [installation guide](installation-guide.md). - ## Installation diff --git a/docs/driver-deployment/tls-configuration.md b/docs/driver-deployment/tls-configuration.md new file mode 100644 index 000000000..fab0e2ff3 --- /dev/null +++ b/docs/driver-deployment/tls-configuration.md @@ -0,0 +1,264 @@ +# TLS Configuration + +## Overview + +When your S3 endpoint uses TLS with certificates signed by a private or internal CA, +the CSI driver needs access to the CA certificate to validate the connection. +The Scality CSI Driver supports injecting custom CA certificates via Kubernetes ConfigMaps. + +This is required when: + +- Your RING S3 endpoint uses HTTPS with a self-signed or internally-signed certificate +- Your organization uses a private CA for internal services +- The S3 endpoint's certificate chain is not in the default system trust store + +## Prerequisites + +- A PEM-encoded CA certificate file (the root or intermediate CA that signed your S3 server certificate) +- The CSI driver Helm chart installed or ready to install + +## Configuration (Helm-Managed) + +The recommended approach uses `--set-file` to pass the CA certificate content directly to Helm. +Helm creates the ConfigMap in both required namespaces automatically. + +### Step 1: Install or Upgrade with CA Certificate Data + +```bash +helm upgrade --install scality-s3-csi \ + ./charts/scality-mountpoint-s3-csi-driver \ + --namespace kube-system \ + --set s3.endpointUrl=https://s3.example.com:443 \ + --set tls.caCertConfigMap=s3-ca-cert \ + --set-file tls.caCertData=/path/to/your/ca.crt +``` + +This single command: + +- Creates a ConfigMap named `s3-ca-cert` in the controller namespace (`kube-system`) +- Creates the same ConfigMap in the mounter pod namespace (`mount-s3`) +- Configures the controller and mounter pods to use the CA certificate + +!!! important "Key Name" + The ConfigMap key is automatically set to `ca-bundle.crt`, which is the key the driver expects. + +### Step 2: Verify + +Check that the controller pod has the CA certificate mounted: + +```bash +kubectl exec -n kube-system deploy/s3-csi-controller \ + -c s3-csi-controller -- ls /etc/ssl/custom-ca/ +``` + +Expected output: `ca-bundle.crt` + +Verify the ConfigMap exists in the mounter pod namespace: + +```bash +kubectl get configmap s3-ca-cert -n mount-s3 +``` + +### Certificate Rotation + +To rotate the CA certificate, update the Helm release with the new certificate file: + +```bash +helm upgrade scality-s3-csi \ + ./charts/scality-mountpoint-s3-csi-driver \ + --namespace kube-system \ + --reuse-values \ + --set-file tls.caCertData=/path/to/new/ca.crt +``` + +Helm updates the ConfigMap in both namespaces. Existing pods will pick up the change +on their next restart. + +## Manual Mode + +If you cannot pass the certificate data via Helm values (e.g., policy restrictions), +you can create the ConfigMaps manually. In this mode, set only `tls.caCertConfigMap` +without `tls.caCertData`. + +!!! info "Why Two Namespaces?" + The CA certificate ConfigMap must exist in **two** namespaces because the controller and + mounter pods run in separate namespaces: + + 1. **Controller namespace** (e.g., `kube-system`) — mounted by the `s3-csi-controller` for + AWS SDK S3 API calls (bucket creation/deletion during dynamic provisioning). + 2. **Mounter pod namespace** (e.g., `mount-s3`) — mounted by mounter pod init containers + that inject the CA into the `mount-s3` trust store. + +### Step 1: Create the CA Certificate ConfigMap in the Controller Namespace + +```bash +kubectl create configmap s3-ca-cert \ + --from-file=ca-bundle.crt=/path/to/your/ca.crt \ + -n kube-system +``` + +!!! important "Key Name" + The ConfigMap key **must** be `ca-bundle.crt`. This is the key the driver expects. + +### Step 2: Install or Upgrade the Helm Chart + +```bash +helm upgrade --install scality-s3-csi \ + ./charts/scality-mountpoint-s3-csi-driver \ + --namespace kube-system \ + --set s3.endpointUrl=https://s3.example.com:443 \ + --set tls.caCertConfigMap=s3-ca-cert +``` + +### Step 3: Create the CA Certificate ConfigMap in the Mounter Namespace + +After Helm creates the `mount-s3` namespace, create the same ConfigMap there: + +```bash +kubectl create configmap s3-ca-cert \ + --from-file=ca-bundle.crt=/path/to/your/ca.crt \ + -n mount-s3 +``` + +!!! warning "Namespace Ordering" + Do **not** attempt to create the ConfigMap in the `mount-s3` namespace before the Helm install — + the namespace does not exist yet. If a ConfigMap is missing from either namespace, the + respective pod will be stuck in `ContainerCreating` with a `configmap not found` event. + +### Switching from Manual to Helm-Managed Mode + +If you previously created ConfigMaps manually and want to switch to Helm-managed mode, +delete the manually created ConfigMaps first — Helm cannot adopt resources it did not create: + +```bash +kubectl delete configmap s3-ca-cert -n kube-system +kubectl delete configmap s3-ca-cert -n mount-s3 +helm upgrade scality-s3-csi \ + ./charts/scality-mountpoint-s3-csi-driver \ + --namespace kube-system \ + --reuse-values \ + --set tls.caCertConfigMap=s3-ca-cert \ + --set-file tls.caCertData=/path/to/your/ca.crt +``` + +## How It Works + +The TLS configuration operates at two levels: + +### Controller Pod (Dynamic Provisioning) + +The controller pod uses the CA certificate for S3 API calls (bucket creation/deletion) +during dynamic provisioning: + +- The ConfigMap is mounted at `/etc/ssl/custom-ca/` in the `s3-csi-controller` container +- The `AWS_CA_BUNDLE` environment variable is set to `/etc/ssl/custom-ca/ca-bundle.crt` +- AWS SDK Go v2 reads this variable and uses the CA certificate for TLS validation + +### Mounter Pods (Volume Mounting) + +Mounter pods use `mount-s3` (which uses s2n-tls) to mount S3 buckets. +s2n-tls reads CA certificates from the system trust store (`/etc/ssl/certs/`), +so a simple volume mount is not sufficient. Instead: + +1. An **initContainer** (`install-ca-cert`) runs before the main `mountpoint` container +2. The initContainer copies the system CA bundle from the Alpine image to a shared emptyDir volume +3. It appends the custom CA certificate from the ConfigMap to the combined bundle +4. The main container mounts the shared volume at `/etc/ssl/certs/` (read-only) +5. `mount-s3` reads the combined trust store and validates the S3 endpoint certificate + +The initContainer runs as non-root and complies with the PodSecurity `restricted` policy +enforced on the mounter pod namespace. + +## Helm Values Reference + +| Parameter | Description | Default | +| --------- | ----------- | ------- | +| `tls.caCertConfigMap` | Name of the ConfigMap containing the CA certificate | `""` (disabled) | +| `tls.caCertData` | PEM-encoded CA certificate content (enables Helm-managed mode) | `""` | +| `tls.initImage.repository` | Image repository for the CA cert init container | `alpine` | +| `tls.initImage.tag` | Image tag for the CA cert init container | `3.21` | +| `tls.initImage.pullPolicy` | Pull policy for the init image | `IfNotPresent` | +| `tls.initResources.requests.cpu` | CPU request for the init container | `10m` | +| `tls.initResources.requests.memory` | Memory request for the init container | `16Mi` | +| `tls.initResources.limits.memory` | Memory limit for the init container | `64Mi` | + +## Why ConfigMap Instead of Secret + +CA certificates are public configuration data, not confidential information. +Using ConfigMaps instead of Secrets: + +- Follows the Kubernetes convention of using ConfigMaps for non-sensitive configuration +- Avoids unnecessary RBAC complexity for managing Secrets +- Makes the certificates easier to inspect and manage + +## Troubleshooting + +### Pod Stuck in ContainerCreating + +If a controller or mounter pod is stuck in `ContainerCreating` after enabling TLS, the CA +certificate ConfigMap is likely missing from that pod's namespace. Check the pod events: + +```bash +kubectl describe pod -n +``` + +Look for an event like: `configmap "s3-ca-cert" not found`. + +To fix, either switch to Helm-managed mode (`--set-file tls.caCertData=...`) or create the +ConfigMap manually in the correct namespace: + +```bash +# For controller pods (controller namespace, default: kube-system) +kubectl create configmap s3-ca-cert \ + --from-file=ca-bundle.crt=/path/to/your/ca.crt \ + -n kube-system + +# For mounter pods (mounter pod namespace, default: mount-s3) +kubectl create configmap s3-ca-cert \ + --from-file=ca-bundle.crt=/path/to/your/ca.crt \ + -n mount-s3 +``` + +### Certificate Not Found + +If mounter pods fail with TLS errors, verify the ConfigMap exists in **both** namespaces: + +1. Controller namespace (default: `kube-system`): + + ```bash + kubectl get configmap s3-ca-cert -n kube-system + ``` + +2. Mounter pod namespace (default: `mount-s3`): + + ```bash + kubectl get configmap s3-ca-cert -n mount-s3 + ``` + +3. The ConfigMap has the correct key: + + ```bash + kubectl get configmap s3-ca-cert -n mount-s3 -o jsonpath='{.data}' | head -c 100 + ``` + +### Certificate Chain Issues + +If you see certificate verification errors despite having the CA cert configured: + +- Ensure you are providing the **root CA** certificate, not the server certificate +- If using an intermediate CA, include the full chain in the `ca-bundle.crt` file +- Verify the certificate is in PEM format (starts with `-----BEGIN CERTIFICATE-----`) + +### Init Container Failures + +If the init container fails, check its logs: + +```bash +kubectl logs -n mount-s3 -c install-ca-cert +``` + +Common issues: + +- The init image must include a system CA bundle at `/etc/ssl/certs/ca-certificates.crt` + (Alpine includes this by default via the `ca-certificates` package) +- The ConfigMap may not be mounted correctly diff --git a/docs/driver-deployment/uninstallation.md b/docs/driver-deployment/uninstallation.md index 7a34364c4..e18dd3026 100644 --- a/docs/driver-deployment/uninstallation.md +++ b/docs/driver-deployment/uninstallation.md @@ -4,13 +4,11 @@ This guide provides instructions for completely removing the Scality CSI Driver ## Before You Begin - !!! warning "Data Persistence" - Uninstalling the CSI driver does **not** delete data in S3 buckets - Existing PersistentVolumes with `Retain` policy will preserve bucket data - Kubernetes pod applications using S3 buckets as volumes will still be able to access their data after the driver is uninstalled deleted as the driver is responsible for mounting S3 when the pod starts. - If the driver is re-installed, pods which lost access to S3 will be able to access their data again. - !!! danger "Access to Data" If the driver is uninstalled while applications are still using S3 volumes, those applications will lose access to their to S3 if the kubernetes pods are deleted. This is due to orphaned FUSE processes. diff --git a/docs/glossary.md b/docs/glossary.md index d86857e18..ef397b97c 100644 --- a/docs/glossary.md +++ b/docs/glossary.md @@ -7,11 +7,13 @@ This glossary defines acronyms, technical terms, and concepts used throughout th | Acronym | Full Form | Definition | |---------|-----------|------------| | **API** | Application Programming Interface | A set of protocols and tools for building software applications | +| **CA** | Certificate Authority | Trusted entity that issues digital certificates for verifying identity | | **CLI** | Command Line Interface | A text-based interface for interacting with software | | **CRD** | Custom Resource Definition | Kubernetes extension mechanism for defining custom resources | | **CRT** | Common Runtime | AWS Common Runtime library used for S3 operations | | **CSI** | Container Storage Interface | A standard for exposing storage systems to containerized workloads | | **DNS** | Domain Name System | System that translates domain names to IP addresses | +| **FUSE** | Filesystem in Userspace | Mechanism allowing non-privileged users to create file systems without editing kernel code | | **GID** | Group Identifier | Numeric identifier for a group in Unix-like systems | | **GHCR** | GitHub Container Registry | GitHub's container image registry service | | **HTTP** | Hypertext Transfer Protocol | Protocol for transferring data over the web | @@ -19,17 +21,22 @@ This glossary defines acronyms, technical terms, and concepts used throughout th | **IAM** | Identity and Access Management | System for managing user identities and permissions | | **JSON** | JavaScript Object Notation | Lightweight data interchange format | | **KMS** | Key Management Service | Service for managing encryption keys | +| **PEM** | Privacy-Enhanced Mail | Text encoding format for cryptographic keys and certificates | | **POSIX** | Portable Operating System Interface | Set of standards for Unix-like operating systems | | **PV** | PersistentVolume | Kubernetes resource representing a piece of storage | | **PVC** | PersistentVolumeClaim | Kubernetes resource requesting storage from a PV | | **RBAC** | Role-Based Access Control | Method of restricting access based on user roles | | **S3** | Simple Storage Service | Object storage service protocol | | **S3PA** | S3 Pod Attachment (MountpointS3PodAttachment) | Kubernetes custom resource that tracks which workload pods are attached to a specific S3 volume, enabling volume sharing and mounter pod lifecycle management | +| **s2n-tls** | Signal to Noise TLS | AWS's open-source TLS implementation used by mount-s3 | | **SDK** | Software Development Kit | Collection of tools for developing applications | +| **SSL** | Secure Sockets Layer | Predecessor to TLS, often used colloquially to mean TLS | | **SSE** | Server-Side Encryption | Encryption of data at rest on the server | +| **TLS** | Transport Layer Security | Cryptographic protocol for secure communication over networks | | **TTL** | Time To Live | Duration for which data is considered valid | | **UID** | User Identifier | Numeric identifier for a user in Unix-like systems | | **URL** | Uniform Resource Locator | Web address identifying a resource | +| **X.509** | X.509 | ITU-T standard for public key certificates, used in TLS | | **YAML** | YAML Ain't Markup Language | Human-readable data serialization standard | ## Technical Terms diff --git a/docs/troubleshooting.md b/docs/troubleshooting.md index ef7548655..101613b5d 100644 --- a/docs/troubleshooting.md +++ b/docs/troubleshooting.md @@ -36,6 +36,7 @@ aws s3 ls s3://your-bucket --endpoint-url https://your-s3-endpoint.com | Pod cannot write/delete files | Missing write permissions | Add `allow-delete` and/or `allow-overwrite` to PV `mountOptions` | | `MountVolume.SetUp failed: context deadline exceeded` with mounter pod log showing `accept unix /comm/mount.sock: i/o timeout` | Mounter pod missing FSGroup in security context | Upgrade to the latest release. As a workaround, remove `fsGroup` from workload pod's security context | | Pod stuck in `ContainerCreating` with "driver name s3.csi.scality.com not found in the list of registered CSI drivers" | CSI driver not yet registered (startup race condition) | Apply `s3.csi.scality.com/agent-not-ready:NoExecute` taint to nodes. See [Node Startup Taint](driver-deployment/node-startup-taint.md) | +| Pod stuck in `ContainerCreating` with `configmap "..." not found` event | CA certificate ConfigMap missing from the pod's namespace | Create the ConfigMap in the correct namespace. See [TLS Troubleshooting](driver-deployment/tls-configuration.md#pod-stuck-in-containercreating) | ### Mount Issues @@ -46,6 +47,7 @@ aws s3 ls s3://your-bucket --endpoint-url https://your-s3-endpoint.com | "Access Denied" | Invalid S3 credentials | 1. Check secret contains `access_key_id` and `secret_access_key`
2. Test credentials with AWS CLI
3. Check bucket policy | | "InvalidBucketName" | Bucket name issue | 1. Check bucket exists
2. Check bucket name format
3. Ensure no typos | | "AWS_ENDPOINT_URL environment variable must be set" | Missing endpoint configuration | Set `s3EndpointUrl` in Helm values or driver configuration | +| TLS handshake failure or certificate verify failed | CA certificate ConfigMap missing or incorrect | Check the CA ConfigMap exists in both the controller namespace (default: `kube-system`) and the mounter pod namespace (`mountpointPod.namespace`, default: `mount-s3`) with key `ca-bundle.crt`. See [TLS Configuration](driver-deployment/tls-configuration.md#certificate-not-found) | ### Volume Issues diff --git a/docs/volume-provisioning/index.md b/docs/volume-provisioning/index.md index 79541e8a6..3fa851cdd 100644 --- a/docs/volume-provisioning/index.md +++ b/docs/volume-provisioning/index.md @@ -16,7 +16,6 @@ The Scality CSI Driver for S3 supports two methods for creating and managing per ## Getting Started - !!! tip "Quick Navigation" **Static Provisioning:** @@ -25,13 +24,13 @@ The Scality CSI Driver for S3 supports two methods for creating and managing per **Dynamic Provisioning:** - - [Overview & Examples](dynamic-provisioning/overview.md) - StorageClass setup and workflows + - [Overview & Examples](dynamic-provisioning/overview.md) - StorageClass setup and workflows - [Credentials Management](../architecture/ring-s3-credentials-management/dynamic-provisioning-credentials-management.md) - Template-based and fixed authentication methods **Common Configuration:** - [Mount Options Reference](mount-options.md) - Customization options for both provisioning methods - + - [TLS Configuration](../driver-deployment/tls-configuration.md) - Custom CA certificate support for HTTPS S3 endpoints ### Quick Start: Static Provisioning diff --git a/magefiles/e2e.go b/magefiles/e2e.go index 644233a3f..2f1cf330f 100644 --- a/magefiles/e2e.go +++ b/magefiles/e2e.go @@ -399,6 +399,98 @@ spec: return nil } +// ============================================================================= +// TLS CA ConfigMap Helpers +// ============================================================================= + +const caCertConfigMapName = "s3-ca-cert" + +// ensureCACertConfigMap creates the CA certificate ConfigMap in the given namespaces. +func ensureCACertConfigMap(namespaces []string) error { + wd, err := os.Getwd() + if err != nil { + return fmt.Errorf("failed to get working directory: %w", err) + } + caCertPath := filepath.Join(wd, certsDir, "ca.crt") + + if _, err := os.Stat(caCertPath); err != nil { + if os.IsNotExist(err) { + return fmt.Errorf("CA certificate not found at %s (run GenerateTLSCerts first)", caCertPath) + } + return fmt.Errorf("failed to access CA certificate at %s: %w", caCertPath, err) + } + + for _, ns := range namespaces { + fmt.Printf("Creating CA cert ConfigMap %s in namespace %s...\n", caCertConfigMapName, ns) + if err := ensureNamespace(ns); err != nil { + return fmt.Errorf("failed to create namespace %s: %w", ns, err) + } + cmYAML, err := sh.Output("kubectl", "create", "configmap", caCertConfigMapName, + fmt.Sprintf("--from-file=ca-bundle.crt=%s", caCertPath), + "-n", ns, "--dry-run=client", "-o", "yaml") + if err != nil { + return fmt.Errorf("failed to generate ConfigMap YAML for namespace %s: %w", ns, err) + } + if err := pipeToKubectlApply(cmYAML); err != nil { + return fmt.Errorf("failed to apply CA cert ConfigMap in namespace %s: %w", ns, err) + } + } + + fmt.Printf("CA cert ConfigMap %s created in namespaces: %v\n", caCertConfigMapName, namespaces) + return nil +} + +// EnsureCACertConfigMap creates the CA certificate ConfigMap in the E2E and mount-s3 namespaces. +func (E2E) EnsureCACertConfigMap() error { + return ensureCACertConfigMap([]string{GetE2ENamespace(), "mount-s3"}) +} + +// TLSAll runs the full TLS E2E workflow: load credentials, deploy S3 with TLS, +// install CSI driver with Helm-managed TLS ConfigMaps, and run E2E tests. +func (E2E) TLSAll() error { + fmt.Println("Starting TLS E2E workflow...") + + // Load credentials from integration_config.json + if err := LoadCredentials(); err != nil { + return fmt.Errorf("failed to load credentials: %w", err) + } + + wd, err := os.Getwd() + if err != nil { + return fmt.Errorf("failed to get working directory: %w", err) + } + caCertPath := filepath.Join(wd, certsDir, "ca.crt") + + if _, err := os.Stat(caCertPath); err != nil { + return fmt.Errorf("CA certificate not accessible at %s (run GenerateTLSCerts first): %w", caCertPath, err) + } + + // Install CSI driver with Helm-managed TLS ConfigMaps. + // --set-file passes the PEM content directly, and the Helm chart creates + // the ConfigMap in both controller and mounter pod namespaces automatically. + if err := installCSIDriver(false, + "--set", fmt.Sprintf("tls.caCertConfigMap=%s", caCertConfigMapName), + "--set-file", fmt.Sprintf("tls.caCertData=%s", caCertPath), + ); err != nil { + return fmt.Errorf("CSI driver installation failed: %w", err) + } + + // Set AWS_CA_BUNDLE so the Ginkgo test binary's S3 client trusts the custom CA + // when creating buckets for pre-provisioned PV tests. + if err := os.Setenv("AWS_CA_BUNDLE", caCertPath); err != nil { + return fmt.Errorf("failed to set AWS_CA_BUNDLE: %w", err) + } + + // Run tests against HTTPS endpoint + tlsEndpoint := fmt.Sprintf("https://%s:%d", tlsHostname, tlsPort) + if err := runGinkgoTests(tlsEndpoint, "", 0, ""); err != nil { + return fmt.Errorf("TLS E2E tests failed: %w", err) + } + + fmt.Println("TLS E2E workflow completed successfully") + return nil +} + // PullImages pulls container images and downloads Go dependencies in parallel. // Reads CSI_IMAGE_REPOSITORY, CSI_IMAGE_TAG, and CLOUDSERVER_TAG env vars. // Skips individual pulls if the corresponding env var is empty. @@ -912,7 +1004,8 @@ func installCSIForE2E() error { // installCSIDriver is the consolidated installer for both E2E and OpenShift workflows. // When openshift is true, grants image pull access to mount-s3 namespace after install. // Both paths use "upgrade --install" for idempotency. -func installCSIDriver(openshift bool) error { +// extraHelmArgs allows passing additional --set or other Helm flags. +func installCSIDriver(openshift bool, extraHelmArgs ...string) error { namespace := GetE2ENamespace() s3EndpointURL := os.Getenv("S3_ENDPOINT_URL") if s3EndpointURL == "" { @@ -972,6 +1065,9 @@ func installCSIDriver(openshift bool) error { helmArgs = append(helmArgs, "--set", fmt.Sprintf("image.repository=%s", imageRepo)) } + // Append extra Helm args (e.g., TLS settings) + helmArgs = append(helmArgs, extraHelmArgs...) + fmt.Println("Running Helm install...") if err := sh.RunV("helm", helmArgs...); err != nil { return fmt.Errorf("helm install failed: %v", err) diff --git a/mkdocs.yml b/mkdocs.yml index bf58963ae..b6120ec4f 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -107,6 +107,7 @@ nav: - Installation Guide: driver-deployment/installation-guide.md - Upgrade Guide: driver-deployment/upgrade-guide.md - Node Startup Taint: driver-deployment/node-startup-taint.md + - TLS Configuration: driver-deployment/tls-configuration.md - Uninstallation: driver-deployment/uninstallation.md - Volume Provisioning: - Overview: volume-provisioning/index.md diff --git a/pkg/podmounter/mppod/creator.go b/pkg/podmounter/mppod/creator.go index fdd960723..4a6242fc0 100644 --- a/pkg/podmounter/mppod/creator.go +++ b/pkg/podmounter/mppod/creator.go @@ -24,6 +24,15 @@ const ( const EmptyDirSizeLimit = 10 * 1024 * 1024 // 10MiB +const TLSEmptyDirSizeLimit = 2 * 1024 * 1024 // 2MiB — room for system CA bundle (~200KB) + custom CAs + +// Volume and container name constants for TLS configuration. +const ( + TLSCACertVolumeName = "custom-ca-cert" + TLSEtcSSLCertsVolumeName = "etc-ssl-certs" + TLSInitContainerName = "install-ca-cert" +) + // A ContainerConfig represents configuration for containers in the spawned Mountpoint Pods. type ContainerConfig struct { Command string @@ -32,6 +41,16 @@ type ContainerConfig struct { ImagePullPolicy corev1.PullPolicy } +// TLSConfig holds TLS configuration for custom CA certificates in mounter pods. +type TLSConfig struct { + CACertConfigMapName string + InitImage string + InitImagePullPolicy corev1.PullPolicy + InitResourcesReqCPU resource.Quantity + InitResourcesReqMemory resource.Quantity + InitResourcesLimMemory resource.Quantity +} + // A Config represents configuration for spawned Mountpoint Pods. type Config struct { Namespace string @@ -42,6 +61,7 @@ type Config struct { Container ContainerConfig CSIDriverVersion string ClusterVariant cluster.Variant + TLS *TLSConfig } // A Creator allows creating specification for Mountpoint Pods to schedule. @@ -62,6 +82,31 @@ func (c *Creator) Create(pod *corev1.Pod, pv *corev1.PersistentVolume) *corev1.P node := pod.Spec.NodeName name := MountpointPodNameFor(string(pod.UID), pv.Name) + volumes := []corev1.Volume{ + // This emptyDir volume is used for communication between Mountpoint Pod and the CSI Driver Node Pod + { + Name: CommunicationDirName, + VolumeSource: corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{ + Medium: corev1.StorageMediumMemory, + SizeLimit: resource.NewQuantity(EmptyDirSizeLimit, resource.BinarySI), + }, + }, + }, + } + + volumeMounts := []corev1.VolumeMount{ + { + Name: CommunicationDirName, + MountPath: filepath.Join("/", CommunicationDirName), + }, + } + + var initContainers []corev1.Container + if c.config.TLS != nil && c.config.TLS.CACertConfigMapName != "" { + volumes, volumeMounts, initContainers = c.configureTLS(volumes, volumeMounts) + } + mpPod := &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: name, @@ -82,6 +127,7 @@ func (c *Creator) Create(pod *corev1.Pod, pv *corev1.PersistentVolume) *corev1.P SecurityContext: &corev1.PodSecurityContext{ FSGroup: c.config.ClusterVariant.MountpointPodUserID(), }, + InitContainers: initContainers, Containers: []corev1.Container{{ Name: "mountpoint", Image: c.config.Container.Image, @@ -98,12 +144,7 @@ func (c *Creator) Create(pod *corev1.Pod, pv *corev1.PersistentVolume) *corev1.P Type: corev1.SeccompProfileTypeRuntimeDefault, }, }, - VolumeMounts: []corev1.VolumeMount{ - { - Name: CommunicationDirName, - MountPath: filepath.Join("/", CommunicationDirName), - }, - }, + VolumeMounts: volumeMounts, }}, PriorityClassName: c.config.PriorityClassName, Affinity: &corev1.Affinity{ @@ -132,18 +173,7 @@ func (c *Creator) Create(pod *corev1.Pod, pv *corev1.PersistentVolume) *corev1.P // would also get descheduled naturally due to CSI volume lifecycle. {Operator: corev1.TolerationOpExists}, }, - Volumes: []corev1.Volume{ - // This emptyDir volume is used for communication between Mountpoint Pod and the CSI Driver Node Pod - { - Name: CommunicationDirName, - VolumeSource: corev1.VolumeSource{ - EmptyDir: &corev1.EmptyDirVolumeSource{ - Medium: corev1.StorageMediumMemory, - SizeLimit: resource.NewQuantity(EmptyDirSizeLimit, resource.BinarySI), - }, - }, - }, - }, + Volumes: volumes, }, } @@ -156,6 +186,96 @@ func (c *Creator) Create(pod *corev1.Pod, pv *corev1.PersistentVolume) *corev1.P return mpPod } +// configureTLS adds TLS-related volumes, volume mounts, and init containers for custom CA certificate support. +// The init container installs the CA certificate into the system trust store so mount-s3's s2n-tls can use it. +func (c *Creator) configureTLS(volumes []corev1.Volume, volumeMounts []corev1.VolumeMount) ([]corev1.Volume, []corev1.VolumeMount, []corev1.Container) { + // ConfigMap volume with ca-bundle.crt key. + // CA certificates are public, non-sensitive data — ConfigMap is the standard K8s choice over Secret. + // Items selects only the ca-bundle.crt key to avoid mounting unrelated keys if the ConfigMap is shared. + volumes = append(volumes, corev1.Volume{ + Name: TLSCACertVolumeName, + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: c.config.TLS.CACertConfigMapName, + }, + Items: []corev1.KeyToPath{{Key: "ca-bundle.crt", Path: "ca-bundle.crt"}}, + }, + }, + }) + + // emptyDir for shared cert store between init container and main container + volumes = append(volumes, corev1.Volume{ + Name: TLSEtcSSLCertsVolumeName, + VolumeSource: corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{ + SizeLimit: resource.NewQuantity(TLSEmptyDirSizeLimit, resource.BinarySI), + }, + }, + }) + + // Mount the shared cert store in the main container (read-only) + volumeMounts = append(volumeMounts, corev1.VolumeMount{ + Name: TLSEtcSSLCertsVolumeName, + MountPath: "/etc/ssl/certs", + ReadOnly: true, + }) + + // Init container that builds a combined CA bundle for mount-s3's s2n-tls. + // + // Why an init container instead of mounting the ConfigMap directly at /etc/ssl/certs? + // Mounting the ConfigMap there would shadow the system CA bundle, leaving mount-s3 + // unable to verify well-known CAs (e.g., AWS endpoints, OCSP responders). + // Instead, we copy the system bundle and append the custom CA into a shared emptyDir, + // so s2n-tls finds both default and custom CAs in /etc/ssl/certs/ca-certificates.crt. + // + // Runs as non-root to comply with PodSecurity "restricted" policy. + initContainers := []corev1.Container{ + { + Name: TLSInitContainerName, + Image: c.config.TLS.InitImage, + ImagePullPolicy: c.config.TLS.InitImagePullPolicy, + Command: []string{ + "sh", "-c", + "set -e; if [ ! -f /etc/ssl/certs/ca-certificates.crt ]; then echo 'ERROR: /etc/ssl/certs/ca-certificates.crt not found. The TLS init image must include the ca-certificates package.' >&2; exit 1; fi; cp /etc/ssl/certs/ca-certificates.crt /shared-certs/ca-certificates.crt; echo >> /shared-certs/ca-certificates.crt; cat /custom-ca/ca-bundle.crt >> /shared-certs/ca-certificates.crt", + }, + VolumeMounts: []corev1.VolumeMount{ + { + Name: TLSCACertVolumeName, + MountPath: "/custom-ca", + ReadOnly: true, + }, + { + Name: TLSEtcSSLCertsVolumeName, + MountPath: "/shared-certs", + }, + }, + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: c.config.TLS.InitResourcesReqCPU, + corev1.ResourceMemory: c.config.TLS.InitResourcesReqMemory, + }, + Limits: corev1.ResourceList{ + corev1.ResourceMemory: c.config.TLS.InitResourcesLimMemory, + }, + }, + SecurityContext: &corev1.SecurityContext{ + AllowPrivilegeEscalation: ptr.To(false), + Capabilities: &corev1.Capabilities{ + Drop: []corev1.Capability{"ALL"}, + }, + RunAsNonRoot: ptr.To(true), + RunAsUser: c.config.ClusterVariant.MountpointPodUserID(), + SeccompProfile: &corev1.SeccompProfile{ + Type: corev1.SeccompProfileTypeRuntimeDefault, + }, + }, + }, + } + + return volumes, volumeMounts, initContainers +} + // extractVolumeAttributes extracts volume attributes from given `pv`. // It always returns a non-nil map, and it's safe to use even though `pv` doesn't contain any volume attributes. func extractVolumeAttributes(pv *corev1.PersistentVolume) map[string]string { diff --git a/pkg/podmounter/mppod/creator_test.go b/pkg/podmounter/mppod/creator_test.go index 4f8cdc2d0..b4ccfbea4 100644 --- a/pkg/podmounter/mppod/creator_test.go +++ b/pkg/podmounter/mppod/creator_test.go @@ -162,6 +162,123 @@ func TestCreatingMountpointPodsInOpenShift(t *testing.T) { createAndVerifyPod(t, cluster.OpenShift, (*int64)(nil)) } +func TestCreatingMountpointPodsWithTLS(t *testing.T) { + tlsConfig := &mppod.TLSConfig{ + CACertConfigMapName: "my-ca-cert", + InitImage: "alpine:3.21", + InitImagePullPolicy: corev1.PullIfNotPresent, + InitResourcesReqCPU: resource.MustParse("10m"), + InitResourcesReqMemory: resource.MustParse("16Mi"), + InitResourcesLimMemory: resource.MustParse("64Mi"), + } + + config := createTestConfig(cluster.DefaultKubernetes) + config.TLS = tlsConfig + creator := mppod.NewCreator(config) + + mpPod := creator.Create(&corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + UID: types.UID(testPodUID), + }, + Spec: corev1.PodSpec{ + NodeName: testNode, + }, + }, &corev1.PersistentVolume{ + ObjectMeta: metav1.ObjectMeta{ + Name: testVolName, + }, + }) + + // Verify 3 volumes: communication + ConfigMap + emptyDir + assert.Equals(t, 3, len(mpPod.Spec.Volumes)) + assert.Equals(t, mppod.CommunicationDirName, mpPod.Spec.Volumes[0].Name) + assert.Equals(t, mppod.TLSCACertVolumeName, mpPod.Spec.Volumes[1].Name) + assert.Equals(t, mppod.TLSEtcSSLCertsVolumeName, mpPod.Spec.Volumes[2].Name) + + // Verify ConfigMap volume (NOT Secret) + if mpPod.Spec.Volumes[1].ConfigMap == nil { + t.Fatal("expected ConfigMap volume source, got nil") + } + assert.Equals(t, "my-ca-cert", mpPod.Spec.Volumes[1].ConfigMap.Name) + assert.Equals(t, []corev1.KeyToPath{{Key: "ca-bundle.crt", Path: "ca-bundle.crt"}}, mpPod.Spec.Volumes[1].ConfigMap.Items) + + // Verify emptyDir volume for shared cert store + if mpPod.Spec.Volumes[2].EmptyDir == nil { + t.Fatal("expected EmptyDir volume source, got nil") + } + assert.Equals(t, resource.NewQuantity(mppod.TLSEmptyDirSizeLimit, resource.BinarySI), mpPod.Spec.Volumes[2].EmptyDir.SizeLimit) + + // Verify 1 initContainer with correct configuration + assert.Equals(t, 1, len(mpPod.Spec.InitContainers)) + initContainer := mpPod.Spec.InitContainers[0] + assert.Equals(t, mppod.TLSInitContainerName, initContainer.Name) + assert.Equals(t, "alpine:3.21", initContainer.Image) + assert.Equals(t, corev1.PullIfNotPresent, initContainer.ImagePullPolicy) + + // Verify init container command includes ca-certificates existence check + assert.Equals(t, []string{ + "sh", "-c", + "set -e; if [ ! -f /etc/ssl/certs/ca-certificates.crt ]; then echo 'ERROR: /etc/ssl/certs/ca-certificates.crt not found. The TLS init image must include the ca-certificates package.' >&2; exit 1; fi; cp /etc/ssl/certs/ca-certificates.crt /shared-certs/ca-certificates.crt; echo >> /shared-certs/ca-certificates.crt; cat /custom-ca/ca-bundle.crt >> /shared-certs/ca-certificates.crt", + }, initContainer.Command) + + // Verify init container mounts + assert.Equals(t, 2, len(initContainer.VolumeMounts)) + assert.Equals(t, mppod.TLSCACertVolumeName, initContainer.VolumeMounts[0].Name) + assert.Equals(t, "/custom-ca", initContainer.VolumeMounts[0].MountPath) + assert.Equals(t, true, initContainer.VolumeMounts[0].ReadOnly) + assert.Equals(t, mppod.TLSEtcSSLCertsVolumeName, initContainer.VolumeMounts[1].Name) + assert.Equals(t, "/shared-certs", initContainer.VolumeMounts[1].MountPath) + + // Verify init container resources + assert.Equals(t, resource.MustParse("10m"), initContainer.Resources.Requests[corev1.ResourceCPU]) + assert.Equals(t, resource.MustParse("16Mi"), initContainer.Resources.Requests[corev1.ResourceMemory]) + assert.Equals(t, resource.MustParse("64Mi"), initContainer.Resources.Limits[corev1.ResourceMemory]) + + // Verify init container has restricted security context (compliant with PodSecurity "restricted") + assert.Equals(t, ptr.To(false), initContainer.SecurityContext.AllowPrivilegeEscalation) + assert.Equals(t, &corev1.Capabilities{Drop: []corev1.Capability{"ALL"}}, initContainer.SecurityContext.Capabilities) + assert.Equals(t, ptr.To(true), initContainer.SecurityContext.RunAsNonRoot) + assert.Equals(t, ptr.To(int64(1000)), initContainer.SecurityContext.RunAsUser) + assert.Equals(t, &corev1.SeccompProfile{Type: corev1.SeccompProfileTypeRuntimeDefault}, initContainer.SecurityContext.SeccompProfile) + + // Verify main container has /etc/ssl/certs mount (read-only) + assert.Equals(t, 2, len(mpPod.Spec.Containers[0].VolumeMounts)) + assert.Equals(t, mppod.CommunicationDirName, mpPod.Spec.Containers[0].VolumeMounts[0].Name) + assert.Equals(t, mppod.TLSEtcSSLCertsVolumeName, mpPod.Spec.Containers[0].VolumeMounts[1].Name) + assert.Equals(t, "/etc/ssl/certs", mpPod.Spec.Containers[0].VolumeMounts[1].MountPath) + assert.Equals(t, true, mpPod.Spec.Containers[0].VolumeMounts[1].ReadOnly) +} + +func TestCreatingMountpointPodsWithoutTLS(t *testing.T) { + config := createTestConfig(cluster.DefaultKubernetes) + // TLS is nil by default + creator := mppod.NewCreator(config) + + mpPod := creator.Create(&corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + UID: types.UID(testPodUID), + }, + Spec: corev1.PodSpec{ + NodeName: testNode, + }, + }, &corev1.PersistentVolume{ + ObjectMeta: metav1.ObjectMeta{ + Name: testVolName, + }, + }) + + // Verify 0 init containers + assert.Equals(t, 0, len(mpPod.Spec.InitContainers)) + + // Verify 1 volume (communication only) + assert.Equals(t, 1, len(mpPod.Spec.Volumes)) + assert.Equals(t, mppod.CommunicationDirName, mpPod.Spec.Volumes[0].Name) + + // Verify 1 volume mount (communication only) + assert.Equals(t, 1, len(mpPod.Spec.Containers[0].VolumeMounts)) + assert.Equals(t, mppod.CommunicationDirName, mpPod.Spec.Containers[0].VolumeMounts[0].Name) +} + func TestNewCreator(t *testing.T) { config := mppod.Config{ Namespace: "test-namespace",