diff --git a/.github/actions/e2e-setup-common/action.yaml b/.github/actions/e2e-setup-common/action.yaml index 19ef2fa95..432e17d6b 100644 --- a/.github/actions/e2e-setup-common/action.yaml +++ b/.github/actions/e2e-setup-common/action.yaml @@ -4,6 +4,10 @@ inputs: ref: description: "Git ref to checkout" required: true + deploy_csi_driver: + description: "Whether to deploy the CSI driver via Helm" + required: false + default: "true" runs: using: "composite" steps: @@ -39,3 +43,42 @@ runs: run: | docker pull ghcr.io/${{ github.repository }}:${{ github.sha }} kind load docker-image ghcr.io/${{ github.repository }}:${{ github.sha }} --name helm-test-cluster + + - name: Deploy Scality Storage + shell: bash + run: |- + set -e -o pipefail; + mkdir -p logs/s3 logs/iam logs/cosi_driver data/vaultdb + chown -R runner:docker logs + chmod -R ugo+rwx data + docker compose --profile s3 up -d --quiet-pull + bash ../scripts/wait_for_local_port.bash 8000 30 + working-directory: .github/scality-storage-deployment + if: ${{ inputs.deploy_csi_driver == 'true' }} + + - name: Set S3 Endpoint URL environment variable + shell: bash + run: | + echo "S3_ENDPOINT_URL=http://$(hostname -I | awk '{print $1}'):8000" >> $GITHUB_ENV + echo "S3 Endpoint URL: http://$(hostname -I | awk '{print $1}'):8000" + if: ${{ inputs.deploy_csi_driver == 'true' }} + + - name: Setup Helm + uses: azure/setup-helm@v4.3.0 + if: ${{ inputs.deploy_csi_driver == 'true' }} + + - name: Install CSI Driver with Helm + if: ${{ inputs.deploy_csi_driver == 'true' }} + shell: bash + run: | + helm upgrade --install mountpoint-s3-csi-driver --namespace kube-system ./charts/scality-mountpoint-s3-csi-driver --values \ + ./charts/scality-mountpoint-s3-csi-driver/values.yaml \ + --set image.repository=ghcr.io/${{ github.repository }} \ + --set image.tag=${{ github.sha }} \ + --set node.serviceAccount.create=true \ + --set node.podInfoOnMountCompat.enable=true \ + --set experimental.podMounter=systemd \ + --set config.s3EndpointUrl=${{ env.S3_ENDPOINT_URL }} + kubectl rollout status daemonset s3-csi-node -n kube-system --timeout=60s + kubectl get pods -A + echo "s3-csi-node-image: $(kubectl get daemonset s3-csi-node -n kube-system -o jsonpath="{$.spec.template.spec.containers[:1].image}")" diff --git a/.github/scality-storage-deployment/cloudserver-config.json b/.github/scality-storage-deployment/cloudserver-config.json new file mode 100644 index 000000000..d43a7b9da --- /dev/null +++ b/.github/scality-storage-deployment/cloudserver-config.json @@ -0,0 +1,49 @@ +{ + "port": 8000, + "listenOn": [], + "restEndpoints": { + "localhost": "us-east-1", + "127.0.0.1": "us-east-1", + "cloudserver-front": "us-east-1", + "s3.docker.test": "us-east-1", + "127.0.0.2": "us-east-1", + "s3.amazonaws.com": "us-east-1" + }, + "websiteEndpoints": [ + "s3-website-us-east-1.amazonaws.com", + "s3-website.us-east-2.amazonaws.com", + "s3-website-us-west-1.amazonaws.com", + "s3-website-us-west-2.amazonaws.com", + "s3-website.ap-south-1.amazonaws.com", + "s3-website.ap-northeast-2.amazonaws.com", + "s3-website-ap-southeast-1.amazonaws.com", + "s3-website-ap-southeast-2.amazonaws.com", + "s3-website-ap-northeast-1.amazonaws.com", + "s3-website.eu-central-1.amazonaws.com", + "s3-website-eu-west-1.amazonaws.com", + "s3-website-sa-east-1.amazonaws.com", + "s3-website.localhost", + "s3-website.scality.test" + ], + "vaultd": { + "host": "localhost", + "port": 8500 + }, + "clusters": 1, + "log": { + "logLevel": "trace", + "dumpLevel": "error" + }, + "healthChecks": { + "allowFrom": ["127.0.0.1/8", "::1"] + }, + "recordLog": { + "enabled": false, + "recordLogName": "s3-recordlog" + }, + "requests": { + "viaProxy": false, + "trustedProxyCIDRs": [], + "extractClientIPFromHeader": "" + } +} diff --git a/.github/scality-storage-deployment/docker-compose.yml b/.github/scality-storage-deployment/docker-compose.yml new file mode 100644 index 000000000..309f9c466 --- /dev/null +++ b/.github/scality-storage-deployment/docker-compose.yml @@ -0,0 +1,11 @@ +services: + s3: + profiles: ['s3'] + image: ${CLOUDSERVER_IMAGE} + network_mode: host + environment: + S3_CONFIG_FILE: /conf/config.json + command: /bin/sh -c "yarn run mem_backend > /logs/s3/s3.log 2>&1" + volumes: + - ./cloudserver-config.json:/conf/config.json:ro + - ./logs/s3:/logs/s3 diff --git a/.github/scripts/wait_for_local_port.bash b/.github/scripts/wait_for_local_port.bash new file mode 100644 index 000000000..0ab03d2f6 --- /dev/null +++ b/.github/scripts/wait_for_local_port.bash @@ -0,0 +1,28 @@ +#!/usr/bin/env bash +wait_for_local_port() { + local port=$1 + local timeout=$2 + local count=0 + local ret=1 + echo "waiting for storage-service:$port" + while [[ "$ret" -eq "1" && "$count" -lt "$timeout" ]] ; do + nc -z -w 1 localhost $port + ret=$? + if [ ! "$ret" -eq "0" ]; then + echo -n . + sleep 1 + count=$(($count+1)) + fi + done + + echo "" + + if [[ "$count" -eq "$timeout" ]]; then + echo "Server did not start in less than $timeout seconds. Exiting..." + exit 1 + fi + + echo "Server got ready in ~${count} seconds. Starting test now..." +} + +wait_for_local_port $1 $2 diff --git a/.github/workflows/ci-and-e2e-tests.yaml b/.github/workflows/ci-and-e2e-tests.yaml index 369a5eb70..1cbf22109 100644 --- a/.github/workflows/ci-and-e2e-tests.yaml +++ b/.github/workflows/ci-and-e2e-tests.yaml @@ -5,6 +5,12 @@ on: branches: - '**' +env: + CLOUDSERVER_IMAGE: ${{ vars.CLOUDSERVER_IMAGE }} + AWS_ACCESS_KEY_ID: "accessKey1" + AWS_SECRET_ACCESS_KEY: "verySecretKey1" + KUBECONFIG: "/home/runner/.kube/config" + jobs: dev-image: name: Dev Image @@ -20,7 +26,7 @@ jobs: controller-e2e-tests: name: E2E Controller Tests - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 needs: dev-image steps: - name: Check out repository @@ -30,6 +36,39 @@ jobs: uses: ./.github/actions/e2e-setup-common with: ref: ${{ github.sha }} + deploy_csi_driver: "false" - name: Run Controller Tests run: make e2e-controller + + # systemd mounter is used when the mounter is launched within the CSI driver pod. + systemd-mounter-e2e-tests: + name: E2E Systemd Mounter Tests + runs-on: ubuntu-24.04 + needs: dev-image + steps: + - name: Check out repository + uses: actions/checkout@v4 + + - name: Run Common Setup + uses: ./.github/actions/e2e-setup-common + with: + ref: ${{ github.sha }} + + - name: Run E2E Tests (SystemdMounter) + env: + S3_ENDPOINT_URL: ${{ env.S3_ENDPOINT_URL }} + run: | + set +e + pushd tests/e2e-kubernetes + KUBECONFIG=/home/runner/.kube/config ginkgo -p -vv -timeout 60m -- \ + --bucket-region=us-east-1 \ + --commit-id=${{ github.sha }} \ + --bucket-prefix=helm-test-cluster + EXIT_CODE=$? + kubectl logs -l app=s3-csi-node -n kube-system --kubeconfig ${KUBECONFIG} + kubectl version --kubeconfig ${KUBECONFIG} + kubectl get nodes -o wide --kubeconfig ${KUBECONFIG} + popd + cat tests/e2e-kubernetes/csi-test-artifacts/output.json + exit $EXIT_CODE diff --git a/E2E_TESTS_DOCUMENTATION.md b/E2E_TESTS_DOCUMENTATION.md new file mode 100644 index 000000000..1c02b74f2 --- /dev/null +++ b/E2E_TESTS_DOCUMENTATION.md @@ -0,0 +1,514 @@ +# S3 CSI Driver E2E Kubernetes Tests Documentation + +This document provides comprehensive documentation for the end-to-end (E2E) tests for the S3 CSI Driver on Kubernetes. These tests verify that the driver works correctly with S3 buckets in a real Kubernetes environment. + +## Table of Contents +- [Introduction](#introduction) +- [Test Environment Setup](#test-environment-setup) +- [Test Execution Guide](#test-execution-guide) +- [Test Suite Overview](#test-suite-overview) +- [Test Scripts Documentation](#test-scripts-documentation) +- [S3 Client Implementation](#s3-client-implementation) +- [Test Implementation Details](#test-implementation-details) +- [Performance Testing with FIO](#performance-testing-with-fio) +- [Troubleshooting](#troubleshooting) +- [Extending the Tests](#extending-the-tests) +- [CI Integration](#ci-integration) + +## Introduction + +The E2E test framework verifies that the S3 CSI Driver can correctly: +- Mount S3 buckets as volumes in Kubernetes pods +- Support both standard S3 and S3 Express storage classes +- Handle authentication and credential management +- Perform basic file operations on mounted volumes +- Configure various mount options +- Support caching mechanisms + +The tests are built using the Kubernetes E2E test framework and Ginkgo testing library, providing integration with standard Kubernetes testing patterns while adding S3-specific test cases. + +### Architecture Overview + +The test framework consists of the following components: + +1. **Test Driver** (`testdriver.go`) - Implements the Kubernetes storage test driver interface +2. **Test Suites** (`testsuites/`) - Contains the test implementations +3. **S3 Client** (`s3client/`) - Handles S3 bucket operations +4. **Scripts** (`scripts/`) - Manages the test environment +5. **Performance Testing** (`fio/`) - Contains FIO configurations for performance testing + +## Test Environment Setup + +### Prerequisites + +To run these tests, you need: +- AWS CLI configured with appropriate credentials +- Go development environment (1.19+) +- kubectl +- Access to create AWS resources (S3 buckets, EC2 instances, etc.) + +Required AWS permissions are listed in the [README.md](./README.md#prerequisites). + +### Environment Variables + +Important environment variables for test configuration: +```bash +export KOPS_STATE_FILE="s3://your-kops-state-store" # set KOPS_STATE_FILE to your bucket when running locally +export AWS_REGION=us-east-1 +export TAG= # CSI Driver image tag to install +export IMAGE_NAME="s3-csi-driver" # repository is inferred from current AWS account and region +export SSH_KEY=/path/to/your/ssh/key.pub # optional +export K8S_VERSION="1.30.0" # optional, must be a full version +export CLUSTER_TYPE=kops # or eksctl +export ARCH=x86 # or arm64 +export MOUNTER_KIND=systemd # type of mounter to use +``` + +### Resource Requirements + +The tests require: +- Kubernetes cluster with at least 3 nodes +- Sufficient IAM permissions for S3 operations +- Sufficient quota for EC2 instances +- Network connectivity to AWS S3 endpoints + +### Cluster Creation Options + +The test framework supports two types of Kubernetes clusters: + +1. **kops** - Kubernetes Operations, a tool to create production-ready Kubernetes clusters on AWS + - Provides more flexibility for cluster configuration + - Requires a state store in S3 + - Configuration file: `kops-patch.yaml` and related files + +2. **eksctl** - Amazon EKS CLI, a tool for creating and managing clusters on Amazon EKS + - Easier to use with Amazon EKS + - Better integration with AWS services + - Configuration file: `eksctl-patch.json` and related files + +The cluster type is specified using the `CLUSTER_TYPE` environment variable. + +## Test Execution Guide + +All commands should be executed from the repository root as described in the [README.md](./README.md). + +### Full Test Sequence + +```bash +# 1. Install required tools +ACTION=install_tools tests/e2e-kubernetes/scripts/run.sh + +# 2. Create a Kubernetes cluster +ACTION=create_cluster tests/e2e-kubernetes/scripts/run.sh + +# 3. Update kubeconfig +ACTION=update_kubeconfig tests/e2e-kubernetes/scripts/run.sh + +# 4. Install the S3 CSI driver +ACTION=install_driver tests/e2e-kubernetes/scripts/run.sh + +# 5. Run tests +ACTION=run_tests tests/e2e-kubernetes/scripts/run.sh + +# 6. Clean up +ACTION=uninstall_driver tests/e2e-kubernetes/scripts/run.sh +ACTION=delete_cluster tests/e2e-kubernetes/scripts/run.sh +``` + +### Local vs CI Test Execution + +When running tests locally: +- You need to provide your own S3 bucket for KOPS_STATE_FILE +- You should use your own AWS credentials +- You can choose to run specific tests or suites + +In CI: +- State bucket and credentials are provided by the CI environment +- All tests are run automatically +- Results are reported in the CI logs + +### Test Command-Line Parameters + +The test command supports several parameters: +``` +--bucket-region: AWS region for creating test buckets +--commit-id: Commit ID used for naming test buckets +--bucket-prefix: Prefix for test bucket names +--performance: Run performance tests (boolean) +--imds-available: Whether instance metadata service is available (boolean) +``` + +## Test Suite Overview + +### Active Test Suites + +The test suites in `e2e_test.go` define which tests are run: + +```go +var CSITestSuites = []func() framework.TestSuite{ + testsuites.InitVolumesTestSuite, + custom_testsuites.InitS3CSIMultiVolumeTestSuite, + custom_testsuites.InitS3MountOptionsTestSuite, + custom_testsuites.InitS3CSICredentialsTestSuite, + custom_testsuites.InitS3CSICacheTestSuite, +} +``` + +1. **Volume Tests** (`testsuites.InitVolumesTestSuite`) + - Tests basic volume operations + - Writes and reads data from mounted volumes + - Verifies content integrity + - **Key test**: Writing 53 bytes to index.html file, then reading and verifying content from another pod + +2. **Multi-Volume Tests** (`custom_testsuites.InitS3CSIMultiVolumeTestSuite`) + - Tests mounting multiple S3 buckets simultaneously + - Verifies isolation between volumes + - Checks that data written to one volume doesn't appear in another + - Tests defined in `testsuites/multivolume.go` + +3. **Mount Options Tests** (`custom_testsuites.InitS3MountOptionsTestSuite`) + - Tests different mount options for S3 volumes + - Verifies that mount options are correctly applied + - Tests specific mount option behaviors + - Tests defined in `testsuites/mountoptions.go` + +4. **Credentials Tests** (`custom_testsuites.InitS3CSICredentialsTestSuite`) + - Tests various credential configurations + - Verifies authentication methods work correctly + - Tests access using different credential types + - Tests defined in `testsuites/credentials.go` + +5. **Cache Tests** (`custom_testsuites.InitS3CSICacheTestSuite`) + - Tests caching functionality + - Verifies cache behavior and performance + - Tests data persistence across pod restarts + - Tests defined in `testsuites/cache.go` + +6. **Performance Tests** (`custom_testsuites.InitS3CSIPerformanceTestSuite`) - only run with `--performance=true` flag + - Benchmarks performance metrics + - Uses FIO for storage performance testing + - Collects and reports performance data + - Tests defined in `testsuites/performance.go` + +### Skipped Test Suites + +Several standard Kubernetes storage test suites are skipped (commented out in `e2e_test.go`) because they test functionality that doesn't apply to S3 storage: + +- `InitCapacityTestSuite` - S3 doesn't have traditional capacity limits +- `InitVolumeIOTestSuite` - Tries to open a file for writing multiple times, which is unsupported by Mountpoint +- `InitVolumeModeTestSuite` - Block mode not supported by S3, only succeeds in checking unused volume is not mounted +- `InitSubPathTestSuite` - Subpath mounting not applicable +- `InitProvisioningTestSuite` - Dynamic provisioning not supported (static only) +- `InitMultiVolumeTestSuite` - Replaced by S3-specific multi-volume test +- `InitVolumeExpandTestSuite` - Volume expansion not applicable to S3 +- `InitDisruptiveTestSuite` - Disruptive tests not applicable +- `InitVolumeLimitsTestSuite` - Volume limits not applicable +- `InitTopologyTestSuite` - Topology not applicable +- `InitVolumeStressTestSuite` - Generic stress tests not applicable +- `InitFsGroupChangePolicyTestSuite` - FsGroup policies not applicable +- `InitSnapshottableTestSuite` - Snapshots not applicable +- `InitSnapshottableStressTestSuite` - Snapshot stress tests not applicable +- `InitVolumePerformanceTestSuite` - Replaced by S3-specific performance tests +- `InitReadWriteOncePodTestSuite` - ReadWriteOnce not applicable (S3 supports ReadWriteMany) + +## Test Scripts Documentation + +The `scripts` directory contains scripts that manage the test environment: + +### run.sh + +The main entry point for test execution. Supports various actions: + +- `install_tools` - Installs required tools (kubectl, helm, kops, eksctl) +- `create_cluster` - Creates a Kubernetes cluster +- `update_kubeconfig` - Updates the kubeconfig file +- `install_driver` - Installs the S3 CSI driver +- `run_tests` - Runs the E2E tests +- `run_perf` - Runs performance tests +- `uninstall_driver` - Uninstalls the S3 CSI driver +- `delete_cluster` - Deletes the Kubernetes cluster +- `e2e_cleanup` - Cleans up resources created during tests + +Implementation details: +- Sets up environment variables and directories +- Sources other scripts (kops.sh, eksctl.sh, helm.sh) +- Executes the requested action +- Handles errors and exit codes + +### kops.sh + +Functions for creating and managing kops clusters: +- `kops_install` - Installs kops +- `kops_create_cluster` - Creates a Kubernetes cluster using kops +- `kops_delete_cluster` - Deletes a kops cluster + +Configuration is done via: +- Environment variables +- Patch files for customization +- Command-line arguments + +### eksctl.sh + +Functions for creating and managing EKS clusters: +- `eksctl_install` - Installs eksctl +- `eksctl_create_cluster` - Creates an EKS cluster +- `eksctl_delete_cluster` - Deletes an EKS cluster + +Configuration is done via: +- Environment variables +- JSON patch files +- Command-line arguments + +### helm.sh + +Handles driver installation via Helm: +- `helm_install` - Installs Helm +- `helm_install_driver` - Installs the S3 CSI driver using Helm +- `helm_uninstall_driver` - Uninstalls the S3 CSI driver +- `driver_installed` - Checks if the driver is installed + +## S3 Client Implementation + +The `s3client` directory contains the S3 client implementation used by the tests: + +### Client Structure + +The S3 client is implemented in `s3client/client.go` and provides: +- Bucket creation and deletion +- Support for both standard S3 and S3 Express directory buckets +- Authentication configuration +- Error handling + +### Authentication Methods + +The client supports multiple authentication methods: +- IAM roles - Used when running in AWS with appropriate IAM roles +- Access keys - Used when explicit credentials are provided +- Instance metadata - Used when running on EC2 instances + +Environment variables for authentication: +- `S3_ENDPOINT_URL` - Custom S3 endpoint +- `AWS_ACCESS_KEY_ID` - Access key ID +- `AWS_SECRET_ACCESS_KEY` - Secret access key + +### Bucket Management + +The client provides functions for: +- Creating standard S3 buckets +- Creating S3 Express directory buckets +- Deleting buckets and their contents +- Waiting for bucket availability + +Bucket naming convention: +- Based on cluster name, commit ID, and random suffix +- Ensures uniqueness for parallel test runs + +### Error Handling + +The client implements error handling for: +- Connection issues +- Permission errors +- Bucket already exists +- Bucket not empty +- Service unavailable + +## Test Implementation Details + +### Test Driver + +The `testdriver.go` file implements the Kubernetes storage test driver interface: + +Key components: +- `s3Driver` struct - Implements the test driver interface +- `s3Volume` struct - Represents an S3 volume +- Test driver initialization and configuration +- Volume creation and mounting + +Implementation details: +- Implements `framework.TestDriver` interface +- Implements `framework.PreprovisionedVolumeTestDriver` interface +- Implements `framework.PreprovisionedPVTestDriver` interface +- Skips unsupported test patterns + +### Volume Creation Workflow + +1. Test creates a bucket via S3 client (`CreateVolume` method) +2. Driver configures the bucket as a persistent volume (`GetPersistentVolumeSource` method) +3. Kubernetes framework creates PVs and PVCs +4. Test pods are created that mount these volumes +5. Tests perform operations on the mounted volumes +6. Resources are cleaned up after tests (`DeleteVolume` method) + +### Test Utilities + +The `testsuites/util.go` file provides helper functions: + +- `genBinDataFromSeed` - Generates random data for tests +- `checkWriteToPath` - Verifies writing to a path +- `checkReadFromPath` - Verifies reading from a path +- `createVolumeResourceWithMountOptions` - Creates volume with specific mount options +- `createPod` - Creates test pods +- `createPodWithServiceAccount` - Creates pods with specific service accounts +- Other utility functions for test implementation + +## Performance Testing with FIO + +The `fio` directory contains FIO (Flexible I/O Tester) configurations for benchmarking: + +### FIO Configuration + +Configuration files define: +- Test duration +- I/O patterns (random, sequential) +- Block sizes +- Number of jobs +- Read/write mix + +### Running Performance Tests + +Performance tests are run with: +```bash +ACTION=run_perf tests/e2e-kubernetes/scripts/run.sh +``` + +Or directly via: +```bash +KUBECONFIG=${KUBECONFIG} go test -ginkgo.vv --bucket-region=${REGION} --commit-id=${TAG} --bucket-prefix=${CLUSTER_NAME} --performance=true --imds-available=true +``` + +### Metrics Collected + +The performance tests collect and report: +- Read/write throughput (MB/s) +- IOPS (I/O operations per second) +- Latency statistics (min, avg, max, percentiles) +- CPU utilization during tests + +Results are stored in JSON format for analysis. + +## Troubleshooting + +### Common Issues + +1. **Cluster creation failure** + - Check AWS permissions + - Verify region quotas + - Check network configuration + - Examine kops/eksctl logs + +2. **Driver installation failure** + - Verify image tag and availability + - Check Helm configuration and logs + - Examine pod logs + - Check for conflicting resources + +3. **Test failures** + - Check S3 bucket accessibility + - Verify authentication configuration + - Check network connectivity to S3 + - Examine test pod logs + +4. **S3 Access Issues** + - Verify IAM permissions + - Check for bucket policy restrictions + - Verify credentials are correctly passed + - Check for endpoint configuration issues + +### Log Collection + +Important logs to collect: + +```bash +# Check driver logs +kubectl logs -l app=s3-csi-node -n kube-system + +# Check test pod logs +kubectl logs -n + +# Check kops logs (if using kops) +kops get cluster +kops validate cluster + +# Check eksctl logs (if using eksctl) +eksctl get cluster +``` + +## Extending the Tests + +### Adding New Test Cases + +1. Identify the appropriate test suite for your test +2. Create a new test function using Ginkgo's `It` block +3. Implement the test logic using helper functions from `testsuites/util.go` +4. Add appropriate assertions and cleanup + +Example: +```go +It("should write and read files with custom permissions", func() { + // Test implementation +}) +``` + +### Adding Tests for New S3 Implementations + +1. Extend the S3 client to support the new implementation: + - Add new authentication methods if needed + - Add new bucket creation functions if needed + - Handle implementation-specific errors + +2. Add test cases that verify implementation-specific functionality: + - Create a new test suite if necessary + - Add tests for unique features + - Test compatibility with existing functionality + +3. Update configuration to support new implementation parameters: + - Add environment variables for configuration + - Update driver to use new parameters + +### Best Practices + +- Clean up resources after tests +- Use unique identifiers for test resources +- Implement proper error handling +- Add appropriate logging for debugging +- Follow existing test patterns +- Make tests independent of each other +- Avoid assumptions about the environment + +## CI Integration + +The E2E tests are run as part of the CI pipeline: + +### CI Configuration + +The tests are executed in CI using the workflow defined in `.github/workflows/ci-and-e2e-tests.yaml`: +1. Build the S3 CSI driver image +2. Create a test cluster +3. Install the driver with the built image +4. Run the E2E tests +5. Clean up resources + +### Environment Setup in CI + +CI environment uses: +- Predefined AWS credentials +- Automated cluster creation +- Parallel test execution +- Automatic cleanup + +### Test Reports + +Test results are reported in CI output: +- Test successes and failures +- Performance metrics (if applicable) +- Resource usage +- Test execution time + +### Common CI Issues + +- Timeouts during cluster creation +- Permission issues with AWS resources +- Race conditions in parallel tests +- Resource cleanup failures + +For detailed information on specific topics, see the code comments and documentation in the respective directories. \ No newline at end of file diff --git a/codecov.yml b/codecov.yml index 6e787d3e3..59ecf13fb 100644 --- a/codecov.yml +++ b/codecov.yml @@ -1,7 +1,7 @@ codecov: - notify: - wait_for_ci: true - after_n_builds: 1 + # notify: + # wait_for_ci: true + # after_n_builds: 2 comment: layout: newheader, reach, files, components, diff, flags # show component info in the PR comment diff --git a/file_operations_test_plan.md b/file_operations_test_plan.md new file mode 100644 index 000000000..b4266c72d --- /dev/null +++ b/file_operations_test_plan.md @@ -0,0 +1,106 @@ +# File Operations Test Plan for S3 CSI Driver + +This document outlines the test plan for validating basic file operations with the S3 CSI Driver. + +## Test Objectives + +To verify that the S3 CSI Driver correctly supports all essential file operations when mounting S3 buckets as volumes in Kubernetes pods. + +## Test Categories + +### 1. Basic File Operations + +- **File Creation** + - Create files of various sizes (empty, small, medium, large) + - Create files with special characters in names + - Create files with very long names + +- **File Reading** + - Read entire files of different sizes + - Perform partial reads (specific byte ranges) + - Verify content integrity + +- **File Updates** + - Overwrite existing files + - Append data to existing files + - Modify specific portions of files + +- **File Deletion** + - Delete individual files + - Delete multiple files in sequence + - Attempt to delete non-existent files + +### 2. Directory Operations + +- **Directory Creation** + - Create empty directories + - Create nested directory structures + - Create directories with special characters + +- **Directory Listing** + - List empty directories + - List directories with few files + - List directories with many files + - List directory hierarchies + +- **Directory Deletion** + - Delete empty directories + - Delete directories with content + - Delete nested directory structures + +### 3. Metadata and Permissions + +- **File Metadata** + - Check file sizes + - Check file timestamps + - Test extended attributes (if supported) + +- **File Permissions** + - Test read/write permissions + - Test execution permissions (if applicable) + - Test ownership settings + +### 4. Concurrent Operations + +- **Multiple Readers** + - Test multiple pods reading the same file + - Verify data consistency across readers + +- **Multiple Writers** + - Test multiple pods writing to different files in same volume + - Test contention handling for same-file writes (if supported) + +### 5. Edge Cases + +- **Path Handling** + - Test absolute vs relative paths + - Test path traversal (../file) + - Test maximum path length + +- **Special Files** + - Test zero-byte files + - Test very large files (multi-GB if supported) + - Test file names with various character sets + +## Test Implementation Plan + +1. Create a new test suite in the `testsuites/` directory +2. Implement test cases for each category +3. Ensure proper cleanup after each test +4. Add metrics collection for performance-sensitive operations +5. Integrate with existing test framework + +## Success Criteria + +- All basic file operations work correctly +- File content integrity is maintained +- Directory operations function as expected +- Proper error handling for invalid operations +- Performance meets acceptable thresholds + +## Test Environment Requirements + +- Kubernetes cluster with S3 CSI driver installed +- Access to S3 endpoint +- Sufficient permissions for all operations +- Multiple worker nodes for concurrent testing \ No newline at end of file diff --git a/full_context_for_tests.md b/full_context_for_tests.md new file mode 100644 index 000000000..5b5d61a05 --- /dev/null +++ b/full_context_for_tests.md @@ -0,0 +1,354 @@ +# File Operations Test Plan for S3 CSI Driver + +This document outlines the test plan for validating basic file operations with the S3 CSI Driver. + +## Test Objectives + +To verify that the S3 CSI Driver correctly supports all essential file operations when mounting S3 buckets as volumes in Kubernetes pods. + +## Test Categories + +### 1. Basic File Operations + +- **File Creation** + - Create files of various sizes (empty, small, medium, large) + - Create files with special characters in names + - Create files with very long names + +- **File Reading** + - Read entire files of different sizes + - Perform partial reads (specific byte ranges) + - Verify content integrity + +- **File Updates** + - Overwrite existing files + - Append data to existing files + - Modify specific portions of files + +- **File Deletion** + - Delete individual files + - Delete multiple files in sequence + - Attempt to delete non-existent files + +### 2. Directory Operations + +- **Directory Creation** + - Create empty directories + - Create nested directory structures + - Create directories with special characters + +- **Directory Listing** + - List empty directories + - List directories with few files + - List directories with many files + - List directory hierarchies + +- **Directory Deletion** + - Delete empty directories + - Delete directories with content + - Delete nested directory structures + +### 3. Metadata and Permissions + +- **File Metadata** + - Check file sizes + - Check file timestamps + - Test extended attributes (if supported) + +- **File Permissions** + - Test read/write permissions + - Test execution permissions (if applicable) + - Test ownership settings + +### 4. Concurrent Operations + +- **Multiple Readers** + - Test multiple pods reading the same file + - Verify data consistency across readers + +- **Multiple Writers** + - Test multiple pods writing to different files in same volume + - Test contention handling for same-file writes (if supported) + +### 5. Edge Cases + +- **Path Handling** + - Test absolute vs relative paths + - Test path traversal (../file) + - Test maximum path length + +- **Special Files** + - Test zero-byte files + - Test very large files (multi-GB if supported) + - Test file names with various character sets + +## Implementation Details + +### Test Suite Structure + +The file operations test suite will follow the structure of existing test suites in the codebase: + +```go +type s3CSIFileOperationsTestSuite struct { + tsInfo storageframework.TestSuiteInfo +} + +func InitS3CSIFileOperationsTestSuite() storageframework.TestSuite { + return &s3CSIFileOperationsTestSuite{ + tsInfo: storageframework.TestSuiteInfo{ + Name: "fileoperations", + TestPatterns: []storageframework.TestPattern{ + storageframework.DefaultFsPreprovisionedPV, + }, + }, + } +} + +func (t *s3CSIFileOperationsTestSuite) GetTestSuiteInfo() storageframework.TestSuiteInfo { + return t.tsInfo +} + +func (t *s3CSIFileOperationsTestSuite) SkipUnsupportedTests(_ storageframework.TestDriver, _ storageframework.TestPattern) { +} + +func (t *s3CSIFileOperationsTestSuite) DefineTests(driver storageframework.TestDriver, pattern storageframework.TestPattern) { + // Test implementations will go here +} +``` + +### Key Test Implementations + +#### File Creation and Reading + +Similar to the existing `checkWriteToPath` and `checkReadFromPath` functions, we'll implement specific tests for file operations: + +```go +// Generate data with a specific size and seed +data := genBinDataFromSeed(dataSize, seed) +encoded := base64.StdEncoding.EncodeToString(data) + +// Write to a file +e2evolume.VerifyExecInPodSucceed(f, pod, fmt.Sprintf("echo %s | base64 -d > %s", encoded, path)) + +// Verify content integrity with SHA256 +sum := sha256.Sum256(data) +e2evolume.VerifyExecInPodSucceed(f, pod, fmt.Sprintf("sha256sum %s | grep -Fq %x", path, sum)) +``` + +#### Directory Operations + +For directory tests, we'll use standard shell commands to create and manipulate directories: + +```go +// Create a nested directory structure +e2evolume.VerifyExecInPodSucceed(f, pod, fmt.Sprintf("mkdir -p %s/level1/level2/level3", basePath)) + +// Create files in various directories +e2evolume.VerifyExecInPodSucceed(f, pod, fmt.Sprintf("echo 'file1' > %s/level1/file1.txt", basePath)) +e2evolume.VerifyExecInPodSucceed(f, pod, fmt.Sprintf("echo 'file2' > %s/level1/level2/file2.txt", basePath)) + +// Verify directory structure +checkListingPathWithEntries(f, pod, fmt.Sprintf("%s/level1", basePath), []string{"file1.txt", "level2"}) +``` + +#### File Metadata and Permissions + +Tests for file metadata and permissions will use standard Linux commands like `stat`: + +```go +// Check file permissions +e2evolume.VerifyExecInPodSucceed(f, pod, fmt.Sprintf("stat -c '%%a %%g %%u' %s | grep '644 %d %d'", + filePath, defaultNonRootGroup, defaultNonRootUser)) + +// Check file size +e2evolume.VerifyExecInPodSucceed(f, pod, fmt.Sprintf("stat -c '%%s' %s | grep '%d'", + filePath, expectedSize)) +``` + +### Utility Functions + +We'll use a combination of existing utility functions and new ones: + +#### Checking File Operations + +```go +// Check if a file exists +func checkFileExists(f *framework.Framework, pod *v1.Pod, path string) { + e2evolume.VerifyExecInPodSucceed(f, pod, fmt.Sprintf("test -f %s", path)) +} + +// Check if a directory exists +func checkDirExists(f *framework.Framework, pod *v1.Pod, path string) { + e2evolume.VerifyExecInPodSucceed(f, pod, fmt.Sprintf("test -d %s", path)) +} + +// Create a file with specific content +func createFileWithContent(f *framework.Framework, pod *v1.Pod, path, content string) { + e2evolume.VerifyExecInPodSucceed(f, pod, fmt.Sprintf("echo '%s' > %s", content, path)) +} + +// Append to an existing file +func appendToFile(f *framework.Framework, pod *v1.Pod, path, content string) { + e2evolume.VerifyExecInPodSucceed(f, pod, fmt.Sprintf("echo '%s' >> %s", content, path)) +} +``` + +### Concurrent Access Testing + +For testing concurrent access, we'll implement a test similar to the existing multivolume test: + +```go +testConcurrentAccess := func(ctx context.Context, pvc *v1.PersistentVolumeClaim, numPods int) { + var pods []*v1.Pod + node := l.config.ClientNodeSelection + + // Create pods + for i := 0; i < numPods; i++ { + pod, err := e2epod.CreatePod(ctx, f.ClientSet, f.Namespace.Name, nil, + []*v1.PersistentVolumeClaim{pvc}, + admissionapi.LevelBaseline, "") + framework.ExpectNoError(err) + pods = append(pods, pod) + } + + // Each pod creates a unique file + for i, pod := range pods { + filePath := fmt.Sprintf("/mnt/volume1/file-%d.txt", i) + content := fmt.Sprintf("Content from pod %d", i) + createFileWithContent(f, pod, filePath, content) + } + + // Each pod verifies all files + for _, pod := range pods { + for i := 0; i < numPods; i++ { + filePath := fmt.Sprintf("/mnt/volume1/file-%d.txt", i) + content := fmt.Sprintf("Content from pod %d", i) + verifyFileContent(f, pod, filePath, content) + } + } +} +``` + +### Performance Considerations + +For performance testing, we'll leverage the existing FIO framework: + +```go +// Example FIO config for large file read test +func largeFileReadTest(f *framework.Framework, pod *v1.Pod, filePath string) { + fioCfg := ` +[global] +name=large_file_read +bs=1M +runtime=30s +time_based +group_reporting +filename=%s + +[sequential_read] +size=1G +rw=read +ioengine=sync +fallocate=none +` + configPath := "/tmp/large_read.fio" + e2evolume.VerifyExecInPodSucceed(f, pod, fmt.Sprintf("echo '%s' > %s", + fmt.Sprintf(fioCfg, filePath), configPath)) + e2evolume.VerifyExecInPodSucceed(f, pod, fmt.Sprintf("fio %s", configPath)) +} +``` + +## S3-Specific Considerations + +When implementing the file operations test suite, several S3-specific considerations must be taken into account: + +### Object Storage vs. File System + +- **Directories are virtual**: S3 is an object store without native directory concepts, so directory operations need special handling +- **Atomic operations**: S3 operations are primarily atomic at the object level, not at the file/partial update level +- **Eventually consistent**: S3 offers eventual consistency which may affect test cases that check for immediate visibility of changes +- **Handling metadata**: S3 objects have their own metadata model which doesn't directly map to file system attributes + +### S3 Limitations and Performance Characteristics + +1. **List operations**: S3 list operations can be slow for directories with many objects +2. **Small file overhead**: There's significant overhead for small file operations on S3 +3. **Prefixes and delimiters**: S3 uses prefixes and delimiters for "directory-like" listing +4. **Sequential vs. random access**: Sequential access patterns perform better than random access +5. **Throughput considerations**: The test suite should measure throughput for different types of operations + +### Mountpoint-Specific Considerations + +Since the CSI driver uses Mountpoint for S3 as its underlying mounting technology, we should account for: + +1. **Caching behavior**: Mountpoint implements various caching mechanisms that may affect test results +2. **Read-after-write consistency**: Test for expected behavior in read-after-write scenarios +3. **Maximum file size**: Test varying file sizes to evaluate performance characteristics +4. **Operations that may be unsupported**: Some standard filesystem operations may be unavailable or behave differently + +## Integration with Existing Framework + +### Using Common Test Utilities + +The file operations test suite will utilize existing test utilities: + +1. **Volume framework**: Leverage Kubernetes E2E storage framework +2. **Pod creation helpers**: Use existing pod creation and management functions +3. **Volume resource management**: Use the framework's volume resource lifecycle management +4. **Test assertions**: Use existing assertion utilities for consistent error reporting + +### Extension Points + +1. **Mount options testing**: Extend existing mount options tests with file operation validation +2. **Multi-volume interactions**: Test file operations across multiple volumes +3. **Cache behavior validation**: Extend cache tests with specific file operation scenarios + +### Key Functions to Reuse + +```go +// From testsuites/util.go +custom_testsuites.CreateVolumeResourceWithMountOptions() // For creating volumes with specific options +custom_testsuites.CreatePod() // For creating pods with volumes +custom_testsuites.PodModifierNonRoot() // For testing non-root user scenarios +custom_testsuites.CheckWriteToPath() // For writing data to files +custom_testsuites.CheckReadFromPath() // For reading data from files +``` + +## Test Implementation Plan + +1. Create a new test suite file `testsuites/fileoperations.go` +2. Implement the core test suite structure +3. Add common utility functions for file operations +4. Implement tests for each category: + - Basic file operations + - Directory operations + - Metadata/permissions tests + - Concurrent access tests + - Edge cases +5. Add the test suite to `e2e_test.go`: + +```go +var CSITestSuites = []func() framework.TestSuite{ + testsuites.InitVolumesTestSuite, + custom_testsuites.InitS3CSIMultiVolumeTestSuite, + custom_testsuites.InitS3MountOptionsTestSuite, + custom_testsuites.InitS3CSICredentialsTestSuite, + custom_testsuites.InitS3CSICacheTestSuite, + custom_testsuites.InitS3CSIFileOperationsTestSuite, // Add new test suite +} +``` + +## Success Criteria + +- All basic file operations work correctly +- File content integrity is maintained +- Directory operations function as expected +- Proper error handling for invalid operations +- Performance meets acceptable thresholds + +## Test Environment Requirements + +- Kubernetes cluster with S3 CSI driver installed +- Access to S3 endpoint +- Sufficient permissions for all operations +- Multiple worker nodes for concurrent testing \ No newline at end of file diff --git a/tests/e2e-kubernetes/E2E_TESTS_DOCUMENTATION.md b/tests/e2e-kubernetes/E2E_TESTS_DOCUMENTATION.md new file mode 100644 index 000000000..1c02b74f2 --- /dev/null +++ b/tests/e2e-kubernetes/E2E_TESTS_DOCUMENTATION.md @@ -0,0 +1,514 @@ +# S3 CSI Driver E2E Kubernetes Tests Documentation + +This document provides comprehensive documentation for the end-to-end (E2E) tests for the S3 CSI Driver on Kubernetes. These tests verify that the driver works correctly with S3 buckets in a real Kubernetes environment. + +## Table of Contents +- [Introduction](#introduction) +- [Test Environment Setup](#test-environment-setup) +- [Test Execution Guide](#test-execution-guide) +- [Test Suite Overview](#test-suite-overview) +- [Test Scripts Documentation](#test-scripts-documentation) +- [S3 Client Implementation](#s3-client-implementation) +- [Test Implementation Details](#test-implementation-details) +- [Performance Testing with FIO](#performance-testing-with-fio) +- [Troubleshooting](#troubleshooting) +- [Extending the Tests](#extending-the-tests) +- [CI Integration](#ci-integration) + +## Introduction + +The E2E test framework verifies that the S3 CSI Driver can correctly: +- Mount S3 buckets as volumes in Kubernetes pods +- Support both standard S3 and S3 Express storage classes +- Handle authentication and credential management +- Perform basic file operations on mounted volumes +- Configure various mount options +- Support caching mechanisms + +The tests are built using the Kubernetes E2E test framework and Ginkgo testing library, providing integration with standard Kubernetes testing patterns while adding S3-specific test cases. + +### Architecture Overview + +The test framework consists of the following components: + +1. **Test Driver** (`testdriver.go`) - Implements the Kubernetes storage test driver interface +2. **Test Suites** (`testsuites/`) - Contains the test implementations +3. **S3 Client** (`s3client/`) - Handles S3 bucket operations +4. **Scripts** (`scripts/`) - Manages the test environment +5. **Performance Testing** (`fio/`) - Contains FIO configurations for performance testing + +## Test Environment Setup + +### Prerequisites + +To run these tests, you need: +- AWS CLI configured with appropriate credentials +- Go development environment (1.19+) +- kubectl +- Access to create AWS resources (S3 buckets, EC2 instances, etc.) + +Required AWS permissions are listed in the [README.md](./README.md#prerequisites). + +### Environment Variables + +Important environment variables for test configuration: +```bash +export KOPS_STATE_FILE="s3://your-kops-state-store" # set KOPS_STATE_FILE to your bucket when running locally +export AWS_REGION=us-east-1 +export TAG= # CSI Driver image tag to install +export IMAGE_NAME="s3-csi-driver" # repository is inferred from current AWS account and region +export SSH_KEY=/path/to/your/ssh/key.pub # optional +export K8S_VERSION="1.30.0" # optional, must be a full version +export CLUSTER_TYPE=kops # or eksctl +export ARCH=x86 # or arm64 +export MOUNTER_KIND=systemd # type of mounter to use +``` + +### Resource Requirements + +The tests require: +- Kubernetes cluster with at least 3 nodes +- Sufficient IAM permissions for S3 operations +- Sufficient quota for EC2 instances +- Network connectivity to AWS S3 endpoints + +### Cluster Creation Options + +The test framework supports two types of Kubernetes clusters: + +1. **kops** - Kubernetes Operations, a tool to create production-ready Kubernetes clusters on AWS + - Provides more flexibility for cluster configuration + - Requires a state store in S3 + - Configuration file: `kops-patch.yaml` and related files + +2. **eksctl** - Amazon EKS CLI, a tool for creating and managing clusters on Amazon EKS + - Easier to use with Amazon EKS + - Better integration with AWS services + - Configuration file: `eksctl-patch.json` and related files + +The cluster type is specified using the `CLUSTER_TYPE` environment variable. + +## Test Execution Guide + +All commands should be executed from the repository root as described in the [README.md](./README.md). + +### Full Test Sequence + +```bash +# 1. Install required tools +ACTION=install_tools tests/e2e-kubernetes/scripts/run.sh + +# 2. Create a Kubernetes cluster +ACTION=create_cluster tests/e2e-kubernetes/scripts/run.sh + +# 3. Update kubeconfig +ACTION=update_kubeconfig tests/e2e-kubernetes/scripts/run.sh + +# 4. Install the S3 CSI driver +ACTION=install_driver tests/e2e-kubernetes/scripts/run.sh + +# 5. Run tests +ACTION=run_tests tests/e2e-kubernetes/scripts/run.sh + +# 6. Clean up +ACTION=uninstall_driver tests/e2e-kubernetes/scripts/run.sh +ACTION=delete_cluster tests/e2e-kubernetes/scripts/run.sh +``` + +### Local vs CI Test Execution + +When running tests locally: +- You need to provide your own S3 bucket for KOPS_STATE_FILE +- You should use your own AWS credentials +- You can choose to run specific tests or suites + +In CI: +- State bucket and credentials are provided by the CI environment +- All tests are run automatically +- Results are reported in the CI logs + +### Test Command-Line Parameters + +The test command supports several parameters: +``` +--bucket-region: AWS region for creating test buckets +--commit-id: Commit ID used for naming test buckets +--bucket-prefix: Prefix for test bucket names +--performance: Run performance tests (boolean) +--imds-available: Whether instance metadata service is available (boolean) +``` + +## Test Suite Overview + +### Active Test Suites + +The test suites in `e2e_test.go` define which tests are run: + +```go +var CSITestSuites = []func() framework.TestSuite{ + testsuites.InitVolumesTestSuite, + custom_testsuites.InitS3CSIMultiVolumeTestSuite, + custom_testsuites.InitS3MountOptionsTestSuite, + custom_testsuites.InitS3CSICredentialsTestSuite, + custom_testsuites.InitS3CSICacheTestSuite, +} +``` + +1. **Volume Tests** (`testsuites.InitVolumesTestSuite`) + - Tests basic volume operations + - Writes and reads data from mounted volumes + - Verifies content integrity + - **Key test**: Writing 53 bytes to index.html file, then reading and verifying content from another pod + +2. **Multi-Volume Tests** (`custom_testsuites.InitS3CSIMultiVolumeTestSuite`) + - Tests mounting multiple S3 buckets simultaneously + - Verifies isolation between volumes + - Checks that data written to one volume doesn't appear in another + - Tests defined in `testsuites/multivolume.go` + +3. **Mount Options Tests** (`custom_testsuites.InitS3MountOptionsTestSuite`) + - Tests different mount options for S3 volumes + - Verifies that mount options are correctly applied + - Tests specific mount option behaviors + - Tests defined in `testsuites/mountoptions.go` + +4. **Credentials Tests** (`custom_testsuites.InitS3CSICredentialsTestSuite`) + - Tests various credential configurations + - Verifies authentication methods work correctly + - Tests access using different credential types + - Tests defined in `testsuites/credentials.go` + +5. **Cache Tests** (`custom_testsuites.InitS3CSICacheTestSuite`) + - Tests caching functionality + - Verifies cache behavior and performance + - Tests data persistence across pod restarts + - Tests defined in `testsuites/cache.go` + +6. **Performance Tests** (`custom_testsuites.InitS3CSIPerformanceTestSuite`) - only run with `--performance=true` flag + - Benchmarks performance metrics + - Uses FIO for storage performance testing + - Collects and reports performance data + - Tests defined in `testsuites/performance.go` + +### Skipped Test Suites + +Several standard Kubernetes storage test suites are skipped (commented out in `e2e_test.go`) because they test functionality that doesn't apply to S3 storage: + +- `InitCapacityTestSuite` - S3 doesn't have traditional capacity limits +- `InitVolumeIOTestSuite` - Tries to open a file for writing multiple times, which is unsupported by Mountpoint +- `InitVolumeModeTestSuite` - Block mode not supported by S3, only succeeds in checking unused volume is not mounted +- `InitSubPathTestSuite` - Subpath mounting not applicable +- `InitProvisioningTestSuite` - Dynamic provisioning not supported (static only) +- `InitMultiVolumeTestSuite` - Replaced by S3-specific multi-volume test +- `InitVolumeExpandTestSuite` - Volume expansion not applicable to S3 +- `InitDisruptiveTestSuite` - Disruptive tests not applicable +- `InitVolumeLimitsTestSuite` - Volume limits not applicable +- `InitTopologyTestSuite` - Topology not applicable +- `InitVolumeStressTestSuite` - Generic stress tests not applicable +- `InitFsGroupChangePolicyTestSuite` - FsGroup policies not applicable +- `InitSnapshottableTestSuite` - Snapshots not applicable +- `InitSnapshottableStressTestSuite` - Snapshot stress tests not applicable +- `InitVolumePerformanceTestSuite` - Replaced by S3-specific performance tests +- `InitReadWriteOncePodTestSuite` - ReadWriteOnce not applicable (S3 supports ReadWriteMany) + +## Test Scripts Documentation + +The `scripts` directory contains scripts that manage the test environment: + +### run.sh + +The main entry point for test execution. Supports various actions: + +- `install_tools` - Installs required tools (kubectl, helm, kops, eksctl) +- `create_cluster` - Creates a Kubernetes cluster +- `update_kubeconfig` - Updates the kubeconfig file +- `install_driver` - Installs the S3 CSI driver +- `run_tests` - Runs the E2E tests +- `run_perf` - Runs performance tests +- `uninstall_driver` - Uninstalls the S3 CSI driver +- `delete_cluster` - Deletes the Kubernetes cluster +- `e2e_cleanup` - Cleans up resources created during tests + +Implementation details: +- Sets up environment variables and directories +- Sources other scripts (kops.sh, eksctl.sh, helm.sh) +- Executes the requested action +- Handles errors and exit codes + +### kops.sh + +Functions for creating and managing kops clusters: +- `kops_install` - Installs kops +- `kops_create_cluster` - Creates a Kubernetes cluster using kops +- `kops_delete_cluster` - Deletes a kops cluster + +Configuration is done via: +- Environment variables +- Patch files for customization +- Command-line arguments + +### eksctl.sh + +Functions for creating and managing EKS clusters: +- `eksctl_install` - Installs eksctl +- `eksctl_create_cluster` - Creates an EKS cluster +- `eksctl_delete_cluster` - Deletes an EKS cluster + +Configuration is done via: +- Environment variables +- JSON patch files +- Command-line arguments + +### helm.sh + +Handles driver installation via Helm: +- `helm_install` - Installs Helm +- `helm_install_driver` - Installs the S3 CSI driver using Helm +- `helm_uninstall_driver` - Uninstalls the S3 CSI driver +- `driver_installed` - Checks if the driver is installed + +## S3 Client Implementation + +The `s3client` directory contains the S3 client implementation used by the tests: + +### Client Structure + +The S3 client is implemented in `s3client/client.go` and provides: +- Bucket creation and deletion +- Support for both standard S3 and S3 Express directory buckets +- Authentication configuration +- Error handling + +### Authentication Methods + +The client supports multiple authentication methods: +- IAM roles - Used when running in AWS with appropriate IAM roles +- Access keys - Used when explicit credentials are provided +- Instance metadata - Used when running on EC2 instances + +Environment variables for authentication: +- `S3_ENDPOINT_URL` - Custom S3 endpoint +- `AWS_ACCESS_KEY_ID` - Access key ID +- `AWS_SECRET_ACCESS_KEY` - Secret access key + +### Bucket Management + +The client provides functions for: +- Creating standard S3 buckets +- Creating S3 Express directory buckets +- Deleting buckets and their contents +- Waiting for bucket availability + +Bucket naming convention: +- Based on cluster name, commit ID, and random suffix +- Ensures uniqueness for parallel test runs + +### Error Handling + +The client implements error handling for: +- Connection issues +- Permission errors +- Bucket already exists +- Bucket not empty +- Service unavailable + +## Test Implementation Details + +### Test Driver + +The `testdriver.go` file implements the Kubernetes storage test driver interface: + +Key components: +- `s3Driver` struct - Implements the test driver interface +- `s3Volume` struct - Represents an S3 volume +- Test driver initialization and configuration +- Volume creation and mounting + +Implementation details: +- Implements `framework.TestDriver` interface +- Implements `framework.PreprovisionedVolumeTestDriver` interface +- Implements `framework.PreprovisionedPVTestDriver` interface +- Skips unsupported test patterns + +### Volume Creation Workflow + +1. Test creates a bucket via S3 client (`CreateVolume` method) +2. Driver configures the bucket as a persistent volume (`GetPersistentVolumeSource` method) +3. Kubernetes framework creates PVs and PVCs +4. Test pods are created that mount these volumes +5. Tests perform operations on the mounted volumes +6. Resources are cleaned up after tests (`DeleteVolume` method) + +### Test Utilities + +The `testsuites/util.go` file provides helper functions: + +- `genBinDataFromSeed` - Generates random data for tests +- `checkWriteToPath` - Verifies writing to a path +- `checkReadFromPath` - Verifies reading from a path +- `createVolumeResourceWithMountOptions` - Creates volume with specific mount options +- `createPod` - Creates test pods +- `createPodWithServiceAccount` - Creates pods with specific service accounts +- Other utility functions for test implementation + +## Performance Testing with FIO + +The `fio` directory contains FIO (Flexible I/O Tester) configurations for benchmarking: + +### FIO Configuration + +Configuration files define: +- Test duration +- I/O patterns (random, sequential) +- Block sizes +- Number of jobs +- Read/write mix + +### Running Performance Tests + +Performance tests are run with: +```bash +ACTION=run_perf tests/e2e-kubernetes/scripts/run.sh +``` + +Or directly via: +```bash +KUBECONFIG=${KUBECONFIG} go test -ginkgo.vv --bucket-region=${REGION} --commit-id=${TAG} --bucket-prefix=${CLUSTER_NAME} --performance=true --imds-available=true +``` + +### Metrics Collected + +The performance tests collect and report: +- Read/write throughput (MB/s) +- IOPS (I/O operations per second) +- Latency statistics (min, avg, max, percentiles) +- CPU utilization during tests + +Results are stored in JSON format for analysis. + +## Troubleshooting + +### Common Issues + +1. **Cluster creation failure** + - Check AWS permissions + - Verify region quotas + - Check network configuration + - Examine kops/eksctl logs + +2. **Driver installation failure** + - Verify image tag and availability + - Check Helm configuration and logs + - Examine pod logs + - Check for conflicting resources + +3. **Test failures** + - Check S3 bucket accessibility + - Verify authentication configuration + - Check network connectivity to S3 + - Examine test pod logs + +4. **S3 Access Issues** + - Verify IAM permissions + - Check for bucket policy restrictions + - Verify credentials are correctly passed + - Check for endpoint configuration issues + +### Log Collection + +Important logs to collect: + +```bash +# Check driver logs +kubectl logs -l app=s3-csi-node -n kube-system + +# Check test pod logs +kubectl logs -n + +# Check kops logs (if using kops) +kops get cluster +kops validate cluster + +# Check eksctl logs (if using eksctl) +eksctl get cluster +``` + +## Extending the Tests + +### Adding New Test Cases + +1. Identify the appropriate test suite for your test +2. Create a new test function using Ginkgo's `It` block +3. Implement the test logic using helper functions from `testsuites/util.go` +4. Add appropriate assertions and cleanup + +Example: +```go +It("should write and read files with custom permissions", func() { + // Test implementation +}) +``` + +### Adding Tests for New S3 Implementations + +1. Extend the S3 client to support the new implementation: + - Add new authentication methods if needed + - Add new bucket creation functions if needed + - Handle implementation-specific errors + +2. Add test cases that verify implementation-specific functionality: + - Create a new test suite if necessary + - Add tests for unique features + - Test compatibility with existing functionality + +3. Update configuration to support new implementation parameters: + - Add environment variables for configuration + - Update driver to use new parameters + +### Best Practices + +- Clean up resources after tests +- Use unique identifiers for test resources +- Implement proper error handling +- Add appropriate logging for debugging +- Follow existing test patterns +- Make tests independent of each other +- Avoid assumptions about the environment + +## CI Integration + +The E2E tests are run as part of the CI pipeline: + +### CI Configuration + +The tests are executed in CI using the workflow defined in `.github/workflows/ci-and-e2e-tests.yaml`: +1. Build the S3 CSI driver image +2. Create a test cluster +3. Install the driver with the built image +4. Run the E2E tests +5. Clean up resources + +### Environment Setup in CI + +CI environment uses: +- Predefined AWS credentials +- Automated cluster creation +- Parallel test execution +- Automatic cleanup + +### Test Reports + +Test results are reported in CI output: +- Test successes and failures +- Performance metrics (if applicable) +- Resource usage +- Test execution time + +### Common CI Issues + +- Timeouts during cluster creation +- Permission issues with AWS resources +- Race conditions in parallel tests +- Resource cleanup failures + +For detailed information on specific topics, see the code comments and documentation in the respective directories. \ No newline at end of file diff --git a/tests/e2e-kubernetes/e2e_test.go b/tests/e2e-kubernetes/e2e_test.go index f966ded50..8e3ae45f3 100644 --- a/tests/e2e-kubernetes/e2e_test.go +++ b/tests/e2e-kubernetes/e2e_test.go @@ -2,6 +2,7 @@ package e2e import ( "flag" + "os" "testing" "github.com/awslabs/aws-s3-csi-driver/tests/e2e-kubernetes/s3client" @@ -28,6 +29,17 @@ func init() { flag.BoolVar(&IMDSAvailable, "imds-available", false, "indicates whether instance metadata service is available") flag.Parse() + // Set environment variables for S3 configuration + if os.Getenv("S3_ENDPOINT_URL") != "" { + s3client.DefaultEndpoint = os.Getenv("S3_ENDPOINT_URL") + } + if os.Getenv("AWS_ACCESS_KEY_ID") != "" { + s3client.DefaultAccessKey = os.Getenv("AWS_ACCESS_KEY_ID") + } + if os.Getenv("AWS_SECRET_ACCESS_KEY") != "" { + s3client.DefaultSecretKey = os.Getenv("AWS_SECRET_ACCESS_KEY") + } + s3client.DefaultRegion = BucketRegion custom_testsuites.DefaultRegion = BucketRegion custom_testsuites.IMDSAvailable = IMDSAvailable diff --git a/tests/e2e-kubernetes/file_operations_test_plan.md b/tests/e2e-kubernetes/file_operations_test_plan.md new file mode 100644 index 000000000..f9e033f61 --- /dev/null +++ b/tests/e2e-kubernetes/file_operations_test_plan.md @@ -0,0 +1,298 @@ +# File Operations Test Plan for S3 CSI Driver + +This document outlines the test plan for validating basic file operations with the S3 CSI Driver. + +## Test Objectives + +To verify that the S3 CSI Driver correctly supports all essential file operations when mounting S3 buckets as volumes in Kubernetes pods. + +## Test Categories + +### 1. Basic File Operations + +- **File Creation** + - Create files of various sizes (empty, small, medium, large) + - Create files with special characters in names + - Create files with very long names + +- **File Reading** + - Read entire files of different sizes + - Perform partial reads (specific byte ranges) + - Verify content integrity + +- **File Updates** + - Overwrite existing files + - Append data to existing files + - Modify specific portions of files + +- **File Deletion** + - Delete individual files + - Delete multiple files in sequence + - Attempt to delete non-existent files + +### 2. Directory Operations + +- **Directory Creation** + - Create empty directories + - Create nested directory structures + - Create directories with special characters + +- **Directory Listing** + - List empty directories + - List directories with few files + - List directories with many files + - List directory hierarchies + +- **Directory Deletion** + - Delete empty directories + - Delete directories with content + - Delete nested directory structures + +### 3. Metadata and Permissions + +- **File Metadata** + - Check file sizes + - Check file timestamps + - Test extended attributes (if supported) + +- **File Permissions** + - Test read/write permissions + - Test execution permissions (if applicable) + - Test ownership settings + +### 4. Concurrent Operations + +- **Multiple Readers** + - Test multiple pods reading the same file + - Verify data consistency across readers + +- **Multiple Writers** + - Test multiple pods writing to different files in same volume + - Test contention handling for same-file writes (if supported) + +### 5. Edge Cases + +- **Path Handling** + - Test absolute vs relative paths + - Test path traversal (../file) + - Test maximum path length + +- **Special Files** + - Test zero-byte files + - Test very large files (multi-GB if supported) + - Test file names with various character sets + +## Implementation Details + +### Test Suite Structure + +The file operations test suite will follow the structure of existing test suites in the codebase: + +```go +type s3CSIFileOperationsTestSuite struct { + tsInfo storageframework.TestSuiteInfo +} + +func InitS3CSIFileOperationsTestSuite() storageframework.TestSuite { + return &s3CSIFileOperationsTestSuite{ + tsInfo: storageframework.TestSuiteInfo{ + Name: "fileoperations", + TestPatterns: []storageframework.TestPattern{ + storageframework.DefaultFsPreprovisionedPV, + }, + }, + } +} + +func (t *s3CSIFileOperationsTestSuite) GetTestSuiteInfo() storageframework.TestSuiteInfo { + return t.tsInfo +} + +func (t *s3CSIFileOperationsTestSuite) SkipUnsupportedTests(_ storageframework.TestDriver, _ storageframework.TestPattern) { +} + +func (t *s3CSIFileOperationsTestSuite) DefineTests(driver storageframework.TestDriver, pattern storageframework.TestPattern) { + // Test implementations will go here +} +``` + +### Key Test Implementations + +#### File Creation and Reading + +Similar to the existing `checkWriteToPath` and `checkReadFromPath` functions, we'll implement specific tests for file operations: + +```go +// Generate data with a specific size and seed +data := genBinDataFromSeed(dataSize, seed) +encoded := base64.StdEncoding.EncodeToString(data) + +// Write to a file +e2evolume.VerifyExecInPodSucceed(f, pod, fmt.Sprintf("echo %s | base64 -d > %s", encoded, path)) + +// Verify content integrity with SHA256 +sum := sha256.Sum256(data) +e2evolume.VerifyExecInPodSucceed(f, pod, fmt.Sprintf("sha256sum %s | grep -Fq %x", path, sum)) +``` + +#### Directory Operations + +For directory tests, we'll use standard shell commands to create and manipulate directories: + +```go +// Create a nested directory structure +e2evolume.VerifyExecInPodSucceed(f, pod, fmt.Sprintf("mkdir -p %s/level1/level2/level3", basePath)) + +// Create files in various directories +e2evolume.VerifyExecInPodSucceed(f, pod, fmt.Sprintf("echo 'file1' > %s/level1/file1.txt", basePath)) +e2evolume.VerifyExecInPodSucceed(f, pod, fmt.Sprintf("echo 'file2' > %s/level1/level2/file2.txt", basePath)) + +// Verify directory structure +checkListingPathWithEntries(f, pod, fmt.Sprintf("%s/level1", basePath), []string{"file1.txt", "level2"}) +``` + +#### File Metadata and Permissions + +Tests for file metadata and permissions will use standard Linux commands like `stat`: + +```go +// Check file permissions +e2evolume.VerifyExecInPodSucceed(f, pod, fmt.Sprintf("stat -c '%%a %%g %%u' %s | grep '644 %d %d'", + filePath, defaultNonRootGroup, defaultNonRootUser)) + +// Check file size +e2evolume.VerifyExecInPodSucceed(f, pod, fmt.Sprintf("stat -c '%%s' %s | grep '%d'", + filePath, expectedSize)) +``` + +### Utility Functions + +We'll use a combination of existing utility functions and new ones: + +#### Checking File Operations + +```go +// Check if a file exists +func checkFileExists(f *framework.Framework, pod *v1.Pod, path string) { + e2evolume.VerifyExecInPodSucceed(f, pod, fmt.Sprintf("test -f %s", path)) +} + +// Check if a directory exists +func checkDirExists(f *framework.Framework, pod *v1.Pod, path string) { + e2evolume.VerifyExecInPodSucceed(f, pod, fmt.Sprintf("test -d %s", path)) +} + +// Create a file with specific content +func createFileWithContent(f *framework.Framework, pod *v1.Pod, path, content string) { + e2evolume.VerifyExecInPodSucceed(f, pod, fmt.Sprintf("echo '%s' > %s", content, path)) +} + +// Append to an existing file +func appendToFile(f *framework.Framework, pod *v1.Pod, path, content string) { + e2evolume.VerifyExecInPodSucceed(f, pod, fmt.Sprintf("echo '%s' >> %s", content, path)) +} +``` + +### Concurrent Access Testing + +For testing concurrent access, we'll implement a test similar to the existing multivolume test: + +```go +testConcurrentAccess := func(ctx context.Context, pvc *v1.PersistentVolumeClaim, numPods int) { + var pods []*v1.Pod + node := l.config.ClientNodeSelection + + // Create pods + for i := 0; i < numPods; i++ { + pod, err := e2epod.CreatePod(ctx, f.ClientSet, f.Namespace.Name, nil, + []*v1.PersistentVolumeClaim{pvc}, + admissionapi.LevelBaseline, "") + framework.ExpectNoError(err) + pods = append(pods, pod) + } + + // Each pod creates a unique file + for i, pod := range pods { + filePath := fmt.Sprintf("/mnt/volume1/file-%d.txt", i) + content := fmt.Sprintf("Content from pod %d", i) + createFileWithContent(f, pod, filePath, content) + } + + // Each pod verifies all files + for _, pod := range pods { + for i := 0; i < numPods; i++ { + filePath := fmt.Sprintf("/mnt/volume1/file-%d.txt", i) + content := fmt.Sprintf("Content from pod %d", i) + verifyFileContent(f, pod, filePath, content) + } + } +} +``` + +### Performance Considerations + +For performance testing, we'll leverage the existing FIO framework: + +```go +// Example FIO config for large file read test +func largeFileReadTest(f *framework.Framework, pod *v1.Pod, filePath string) { + fioCfg := ` +[global] +name=large_file_read +bs=1M +runtime=30s +time_based +group_reporting +filename=%s + +[sequential_read] +size=1G +rw=read +ioengine=sync +fallocate=none +` + configPath := "/tmp/large_read.fio" + e2evolume.VerifyExecInPodSucceed(f, pod, fmt.Sprintf("echo '%s' > %s", + fmt.Sprintf(fioCfg, filePath), configPath)) + e2evolume.VerifyExecInPodSucceed(f, pod, fmt.Sprintf("fio %s", configPath)) +} +``` + +## Test Implementation Plan + +1. Create a new test suite file `testsuites/fileoperations.go` +2. Implement the core test suite structure +3. Add common utility functions for file operations +4. Implement tests for each category: + - Basic file operations + - Directory operations + - Metadata/permissions tests + - Concurrent access tests + - Edge cases +5. Add the test suite to `e2e_test.go`: + +```go +var CSITestSuites = []func() framework.TestSuite{ + testsuites.InitVolumesTestSuite, + custom_testsuites.InitS3CSIMultiVolumeTestSuite, + custom_testsuites.InitS3MountOptionsTestSuite, + custom_testsuites.InitS3CSICredentialsTestSuite, + custom_testsuites.InitS3CSICacheTestSuite, + custom_testsuites.InitS3CSIFileOperationsTestSuite, // Add new test suite +} +``` + +## Success Criteria + +- All basic file operations work correctly +- File content integrity is maintained +- Directory operations function as expected +- Proper error handling for invalid operations +- Performance meets acceptable thresholds + +## Test Environment Requirements + +- Kubernetes cluster with S3 CSI driver installed +- Access to S3 endpoint +- Sufficient permissions for all operations +- Multiple worker nodes for concurrent testing \ No newline at end of file diff --git a/tests/e2e-kubernetes/fileoperations.go b/tests/e2e-kubernetes/fileoperations.go new file mode 100644 index 000000000..50bc89fc0 --- /dev/null +++ b/tests/e2e-kubernetes/fileoperations.go @@ -0,0 +1,432 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "context" + "fmt" + "path/filepath" + "strings" + + "github.com/onsi/ginkgo/v2" + "github.com/onsi/gomega" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/kubernetes/test/e2e/framework" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" + e2evolume "k8s.io/kubernetes/test/e2e/framework/volume" + storageframework "k8s.io/kubernetes/test/e2e/storage/framework" + admissionapi "k8s.io/pod-security-admission/api" + + custom_testsuites "github.com/awslabs/aws-s3-csi-driver/tests/e2e-kubernetes/testsuites" +) + +type s3CSIFileOperationsTestSuite struct { + tsInfo storageframework.TestSuiteInfo +} + +func InitS3CSIFileOperationsTestSuite() storageframework.TestSuite { + return &s3CSIFileOperationsTestSuite{ + tsInfo: storageframework.TestSuiteInfo{ + Name: "fileoperations", + TestPatterns: []storageframework.TestPattern{ + storageframework.DefaultFsPreprovisionedPV, + }, + }, + } +} + +func (t *s3CSIFileOperationsTestSuite) GetTestSuiteInfo() storageframework.TestSuiteInfo { + return t.tsInfo +} + +func (t *s3CSIFileOperationsTestSuite) SkipUnsupportedTests(_ storageframework.TestDriver, pattern storageframework.TestPattern) { + if pattern.VolType != storageframework.PreprovisionedPV { + framework.Skipf("Suite %q does not support %v", t.tsInfo.Name, pattern.VolType) + } +} + +func (t *s3CSIFileOperationsTestSuite) DefineTests(driver storageframework.TestDriver, pattern storageframework.TestPattern) { + type local struct { + resources []*storageframework.VolumeResource + config *storageframework.PerTestConfig + } + var ( + l local + ) + + f := framework.NewFrameworkWithCustomTimeouts(custom_testsuites.NamespacePrefix+"fileoperations", storageframework.GetDriverTimeouts(driver)) + f.NamespacePodSecurityLevel = admissionapi.LevelBaseline + + cleanup := func(ctx context.Context) { + var errs []error + for _, resource := range l.resources { + errs = append(errs, resource.CleanupResource(ctx)) + } + framework.ExpectNoError(errors.NewAggregate(errs), "while cleanup resource") + } + + ginkgo.BeforeEach(func(ctx context.Context) { + l = local{} + l.config = driver.PrepareTest(ctx, f) + ginkgo.DeferCleanup(cleanup) + }) + + // Helper functions + checkFileExists := func(pod *v1.Pod, path string) { + e2evolume.VerifyExecInPodSucceed(f, pod, fmt.Sprintf("test -f %s", path)) + } + + checkDirExists := func(pod *v1.Pod, path string) { + e2evolume.VerifyExecInPodSucceed(f, pod, fmt.Sprintf("test -d %s", path)) + } + + createFileWithContent := func(pod *v1.Pod, path, content string) { + e2evolume.VerifyExecInPodSucceed(f, pod, fmt.Sprintf("echo '%s' > %s", content, path)) + } + + appendToFile := func(pod *v1.Pod, path, content string) { + e2evolume.VerifyExecInPodSucceed(f, pod, fmt.Sprintf("echo '%s' >> %s", content, path)) + } + + verifyFileContent := func(pod *v1.Pod, path, expectedContent string) { + e2evolume.VerifyExecInPodSucceed(f, pod, fmt.Sprintf("grep -q '%s' %s", expectedContent, path)) + } + + verifyFileSize := func(pod *v1.Pod, path string, expectedSize int) { + e2evolume.VerifyExecInPodSucceed(f, pod, fmt.Sprintf("stat -c '%%s' %s | grep '%d'", path, expectedSize)) + } + + verifyFilePermissions := func(pod *v1.Pod, path string, expectedPermissions string) { + e2evolume.VerifyExecInPodSucceed(f, pod, fmt.Sprintf("stat -c '%%a' %s | grep '%s'", path, expectedPermissions)) + } + + listDirectory := func(pod *v1.Pod, path string) string { + stdout, _, err := e2evolume.PodExec(f, pod, fmt.Sprintf("ls -1 %s", path)) + framework.ExpectNoError(err) + return stdout + } + + checkListingPathWithEntries := func(pod *v1.Pod, path string, entries []string) { + cmd := fmt.Sprintf("ls -1 %s", path) + stdout, stderr, err := e2evolume.PodExec(f, pod, cmd) + framework.ExpectNoError(err, + "%q should succeed, but failed with error message %q\nstdout: %s\nstderr: %s", + cmd, err, stdout, stderr) + + // Split output by newlines and remove empty strings + fileList := strings.Split(strings.TrimSpace(stdout), "\n") + gomega.Expect(fileList).To(gomega.ConsistOf(entries)) + } + + // Main test functions + testBasicFileOperations := func(ctx context.Context) { + resource := custom_testsuites.CreateVolumeResourceWithMountOptions(ctx, l.config, pattern, []string{"allow-delete"}) + l.resources = append(l.resources, resource) + + ginkgo.By("Creating pod with a volume") + pod := e2epod.MakePod(f.Namespace.Name, nil, []*v1.PersistentVolumeClaim{resource.Pvc}, admissionapi.LevelBaseline, "") + var err error + pod, err = custom_testsuites.CreatePod(ctx, f.ClientSet, f.Namespace.Name, pod) + framework.ExpectNoError(err) + defer func() { + framework.ExpectNoError(e2epod.DeletePodWithWait(ctx, f.ClientSet, pod)) + }() + + basePath := "/mnt/volume1" + + // Test file creation + ginkgo.By("Creating files of different sizes") + emptyFile := filepath.Join(basePath, "empty.txt") + smallFile := filepath.Join(basePath, "small.txt") + mediumFile := filepath.Join(basePath, "medium.txt") + + e2evolume.VerifyExecInPodSucceed(f, pod, fmt.Sprintf("touch %s", emptyFile)) + createFileWithContent(pod, smallFile, "This is a small file") + + // Create a 100KB file + e2evolume.VerifyExecInPodSucceed(f, pod, fmt.Sprintf("dd if=/dev/urandom of=%s bs=1024 count=100", mediumFile)) + + // Verify files exist + checkFileExists(pod, emptyFile) + checkFileExists(pod, smallFile) + checkFileExists(pod, mediumFile) + + // Verify file sizes + verifyFileSize(pod, emptyFile, 0) + verifyFileContent(pod, smallFile, "This is a small file") + + // Test file with special characters + specialCharFile := filepath.Join(basePath, "special_#$%.txt") + createFileWithContent(pod, specialCharFile, "File with special chars in name") + checkFileExists(pod, specialCharFile) + + // Test long filename (255 chars is max in many filesystems) + longName := strings.Repeat("a", 200) + ".txt" + longNameFile := filepath.Join(basePath, longName) + createFileWithContent(pod, longNameFile, "File with very long name") + checkFileExists(pod, longNameFile) + + // Test file updates + ginkgo.By("Updating files") + // Overwrite + createFileWithContent(pod, smallFile, "This file has been overwritten") + verifyFileContent(pod, smallFile, "This file has been overwritten") + + // Append + appendToFile(pod, smallFile, "This text is appended") + verifyFileContent(pod, smallFile, "This text is appended") + + // Test file deletion + ginkgo.By("Deleting files") + e2evolume.VerifyExecInPodSucceed(f, pod, fmt.Sprintf("rm %s", emptyFile)) + e2evolume.VerifyExecInPodSucceed(f, pod, fmt.Sprintf("rm %s", smallFile)) + + // Verify files are deleted + e2evolume.VerifyExecInPodFail(f, pod, fmt.Sprintf("test -f %s", emptyFile), 1) + e2evolume.VerifyExecInPodFail(f, pod, fmt.Sprintf("test -f %s", smallFile), 1) + + // Try to delete non-existent file + e2evolume.VerifyExecInPodFail(f, pod, fmt.Sprintf("rm %s", filepath.Join(basePath, "nonexistent.txt")), 1) + } + + testDirectoryOperations := func(ctx context.Context) { + resource := custom_testsuites.CreateVolumeResourceWithMountOptions(ctx, l.config, pattern, []string{"allow-delete"}) + l.resources = append(l.resources, resource) + + ginkgo.By("Creating pod with a volume") + pod := e2epod.MakePod(f.Namespace.Name, nil, []*v1.PersistentVolumeClaim{resource.Pvc}, admissionapi.LevelBaseline, "") + var err error + pod, err = custom_testsuites.CreatePod(ctx, f.ClientSet, f.Namespace.Name, pod) + framework.ExpectNoError(err) + defer func() { + framework.ExpectNoError(e2epod.DeletePodWithWait(ctx, f.ClientSet, pod)) + }() + + basePath := "/mnt/volume1" + + // Test directory creation + ginkgo.By("Creating directories") + emptyDir := filepath.Join(basePath, "empty-dir") + nestedPath := filepath.Join(basePath, "dir1/dir2/dir3") + specialCharDir := filepath.Join(basePath, "special_$dir#") + + e2evolume.VerifyExecInPodSucceed(f, pod, fmt.Sprintf("mkdir %s", emptyDir)) + e2evolume.VerifyExecInPodSucceed(f, pod, fmt.Sprintf("mkdir -p %s", nestedPath)) + e2evolume.VerifyExecInPodSucceed(f, pod, fmt.Sprintf("mkdir %s", specialCharDir)) + + // Verify directories exist + checkDirExists(pod, emptyDir) + checkDirExists(pod, nestedPath) + checkDirExists(pod, specialCharDir) + + // Test directory listing + ginkgo.By("Listing directories") + // List base directory with the created dirs + checkListingPathWithEntries(pod, basePath, []string{"empty-dir", "dir1", "special_$dir#"}) + + // Create files in directories + createFileWithContent(pod, filepath.Join(emptyDir, "file1.txt"), "File 1") + createFileWithContent(pod, filepath.Join(emptyDir, "file2.txt"), "File 2") + createFileWithContent(pod, filepath.Join(nestedPath, "nested-file.txt"), "Nested file") + + // List directory with files + checkListingPathWithEntries(pod, emptyDir, []string{"file1.txt", "file2.txt"}) + + // Test directory deletion + ginkgo.By("Deleting directories") + // Delete empty dir first (after removing its files) + e2evolume.VerifyExecInPodSucceed(f, pod, fmt.Sprintf("rm %s/file1.txt %s/file2.txt", emptyDir, emptyDir)) + e2evolume.VerifyExecInPodSucceed(f, pod, fmt.Sprintf("rmdir %s", emptyDir)) + + // Delete non-empty directory with recursive flag + e2evolume.VerifyExecInPodSucceed(f, pod, fmt.Sprintf("rm -rf %s", filepath.Join(basePath, "dir1"))) + + // Verify directories are deleted + e2evolume.VerifyExecInPodFail(f, pod, fmt.Sprintf("test -d %s", emptyDir), 1) + e2evolume.VerifyExecInPodFail(f, pod, fmt.Sprintf("test -d %s", filepath.Join(basePath, "dir1")), 1) + } + + testMetadataAndPermissions := func(ctx context.Context) { + resource := custom_testsuites.CreateVolumeResourceWithMountOptions(ctx, l.config, pattern, []string{ + "allow-delete", + "allow-other", + fmt.Sprintf("uid=%d", custom_testsuites.DefaultNonRootUser), + fmt.Sprintf("gid=%d", custom_testsuites.DefaultNonRootGroup), + }) + l.resources = append(l.resources, resource) + + ginkgo.By("Creating pod with a volume") + pod := e2epod.MakePod(f.Namespace.Name, nil, []*v1.PersistentVolumeClaim{resource.Pvc}, admissionapi.LevelBaseline, "") + custom_testsuites.PodModifierNonRoot(pod) + var err error + pod, err = custom_testsuites.CreatePod(ctx, f.ClientSet, f.Namespace.Name, pod) + framework.ExpectNoError(err) + defer func() { + framework.ExpectNoError(e2epod.DeletePodWithWait(ctx, f.ClientSet, pod)) + }() + + basePath := "/mnt/volume1" + testFile := filepath.Join(basePath, "permissions-test.txt") + testDir := filepath.Join(basePath, "permissions-test-dir") + + // Create file and directory + createFileWithContent(pod, testFile, "Testing permissions") + e2evolume.VerifyExecInPodSucceed(f, pod, fmt.Sprintf("mkdir %s", testDir)) + + // Test file metadata + ginkgo.By("Testing file metadata") + // Check file size + verifyFileSize(pod, testFile, 19) // "Testing permissions" = 19 bytes + + // Check file permissions + ginkgo.By("Testing file permissions") + verifyFilePermissions(pod, testFile, "644") // Default file permissions + verifyFilePermissions(pod, testDir, "755") // Default directory permissions + + // Check ownership + e2evolume.VerifyExecInPodSucceed(f, pod, fmt.Sprintf("stat -c '%%u:%%g' %s | grep '%d:%d'", + testFile, custom_testsuites.DefaultNonRootUser, custom_testsuites.DefaultNonRootGroup)) + + // Change permissions and verify + e2evolume.VerifyExecInPodSucceed(f, pod, fmt.Sprintf("chmod 600 %s", testFile)) + verifyFilePermissions(pod, testFile, "600") + } + + testConcurrentAccess := func(ctx context.Context) { + resource := custom_testsuites.CreateVolumeResourceWithMountOptions(ctx, l.config, pattern, []string{"allow-delete"}) + l.resources = append(l.resources, resource) + + ginkgo.By("Creating multiple pods to access the same volume") + const numPods = 3 + var pods []*v1.Pod + + // Create pods + for i := 0; i < numPods; i++ { + pod := e2epod.MakePod(f.Namespace.Name, nil, []*v1.PersistentVolumeClaim{resource.Pvc}, admissionapi.LevelBaseline, "") + pod.Name = fmt.Sprintf("%s-concurrent-%d", f.Namespace.Name, i) + var err error + pod, err = custom_testsuites.CreatePod(ctx, f.ClientSet, f.Namespace.Name, pod) + framework.ExpectNoError(err) + defer func(p *v1.Pod) { + framework.ExpectNoError(e2epod.DeletePodWithWait(ctx, f.ClientSet, p)) + }(pod) + pods = append(pods, pod) + } + + // Test multiple readers + ginkgo.By("Testing multiple readers") + sharedFile := "/mnt/volume1/shared-file.txt" + + // First pod creates the file + content := "This is a shared file for multiple pods to read" + createFileWithContent(pods[0], sharedFile, content) + + // All pods read the file + for i, pod := range pods { + ginkgo.By(fmt.Sprintf("Pod %d reading shared file", i)) + verifyFileContent(pod, sharedFile, content) + } + + // Test multiple writers to different files + ginkgo.By("Testing multiple writers to different files") + + // Each pod writes to its own file + for i, pod := range pods { + fileName := fmt.Sprintf("/mnt/volume1/pod-%d-file.txt", i) + fileContent := fmt.Sprintf("This file was written by pod %d", i) + createFileWithContent(pod, fileName, fileContent) + } + + // Each pod verifies all files exist and have correct content + for _, pod := range pods { + for i := 0; i < numPods; i++ { + fileName := fmt.Sprintf("/mnt/volume1/pod-%d-file.txt", i) + fileContent := fmt.Sprintf("This file was written by pod %d", i) + verifyFileContent(pod, fileName, fileContent) + } + } + } + + testEdgeCases := func(ctx context.Context) { + resource := custom_testsuites.CreateVolumeResourceWithMountOptions(ctx, l.config, pattern, []string{"allow-delete"}) + l.resources = append(l.resources, resource) + + ginkgo.By("Creating pod with a volume") + pod := e2epod.MakePod(f.Namespace.Name, nil, []*v1.PersistentVolumeClaim{resource.Pvc}, admissionapi.LevelBaseline, "") + var err error + pod, err = custom_testsuites.CreatePod(ctx, f.ClientSet, f.Namespace.Name, pod) + framework.ExpectNoError(err) + defer func() { + framework.ExpectNoError(e2epod.DeletePodWithWait(ctx, f.ClientSet, pod)) + }() + + basePath := "/mnt/volume1" + + // Test path handling + ginkgo.By("Testing path handling") + // Create nested directory structure + nestedDir := filepath.Join(basePath, "path/test/dir") + e2evolume.VerifyExecInPodSucceed(f, pod, fmt.Sprintf("mkdir -p %s", nestedDir)) + + // Test relative paths + e2evolume.VerifyExecInPodSucceed(f, pod, fmt.Sprintf("cd %s && echo 'relative path test' > ./rel-file.txt", basePath)) + checkFileExists(pod, filepath.Join(basePath, "rel-file.txt")) + + // Test path traversal + e2evolume.VerifyExecInPodSucceed(f, pod, fmt.Sprintf("cd %s && echo 'path traversal test' > path/test/../traversal-file.txt", basePath)) + checkFileExists(pod, filepath.Join(basePath, "path/traversal-file.txt")) + + // Test special files + ginkgo.By("Testing special files") + // Zero-byte file + zeroByteFile := filepath.Join(basePath, "zero-byte.txt") + e2evolume.VerifyExecInPodSucceed(f, pod, fmt.Sprintf("touch %s", zeroByteFile)) + verifyFileSize(pod, zeroByteFile, 0) + + // Large file (1MB) + largeFile := filepath.Join(basePath, "large-file.bin") + e2evolume.VerifyExecInPodSucceed(f, pod, fmt.Sprintf("dd if=/dev/urandom of=%s bs=1M count=1", largeFile)) + checkFileExists(pod, largeFile) + + // File with unusual characters + unusualCharsFile := filepath.Join(basePath, "unusual_チars_файл_αρχείο.txt") + createFileWithContent(pod, unusualCharsFile, "File with Unicode characters in the name") + checkFileExists(pod, unusualCharsFile) + } + + // Define the tests + ginkgo.It("should support basic file operations", func(ctx context.Context) { + testBasicFileOperations(ctx) + }) + + ginkgo.It("should support directory operations", func(ctx context.Context) { + testDirectoryOperations(ctx) + }) + + ginkgo.It("should handle file metadata and permissions", func(ctx context.Context) { + testMetadataAndPermissions(ctx) + }) + + ginkgo.It("should support concurrent access from multiple pods", func(ctx context.Context) { + testConcurrentAccess(ctx) + }) + + ginkgo.It("should handle edge cases", func(ctx context.Context) { + testEdgeCases(ctx) + }) +} diff --git a/tests/e2e-kubernetes/go.mod b/tests/e2e-kubernetes/go.mod index 026cb17a5..4ed7d925a 100644 --- a/tests/e2e-kubernetes/go.mod +++ b/tests/e2e-kubernetes/go.mod @@ -5,6 +5,7 @@ go 1.24 require ( github.com/aws/aws-sdk-go-v2 v1.30.3 github.com/aws/aws-sdk-go-v2/config v1.25.12 + github.com/aws/aws-sdk-go-v2/credentials v1.16.10 github.com/aws/aws-sdk-go-v2/service/iam v1.34.3 github.com/aws/aws-sdk-go-v2/service/s3 v1.47.3 github.com/aws/aws-sdk-go-v2/service/sts v1.26.3 @@ -24,7 +25,6 @@ require ( github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df // indirect github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a // indirect github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.5.3 // indirect - github.com/aws/aws-sdk-go-v2/credentials v1.16.10 // indirect github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.9 // indirect github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.15 // indirect github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.15 // indirect diff --git a/tests/e2e-kubernetes/plans/s3_adaptation_plan.md b/tests/e2e-kubernetes/plans/s3_adaptation_plan.md new file mode 100644 index 000000000..df1a6755b --- /dev/null +++ b/tests/e2e-kubernetes/plans/s3_adaptation_plan.md @@ -0,0 +1,162 @@ +# S3 Adaptation Plan for E2E Tests + +This document outlines the plan for adapting the S3 CSI Driver E2E tests to work with S3-compatible storage implementations using static credentials. + +## 1. Disable S3 Express Directory Bucket Tests + +### 1.1 Modify Directory Bucket Creation in Test Driver +- In `testdriver.go`, modify the `CreateVolume` method to always use standard buckets: + +```go +func (d *s3Driver) CreateVolume(ctx context.Context, config *framework.PerTestConfig, volumeType framework.TestVolType) framework.TestVolume { + if volumeType != framework.PreprovisionedPV { + f.Failf("Unsupported volType: %v is specified", volumeType) + } + + // Always use standard buckets, even for S3 Express test identifier + bucketName, deleteBucket := d.client.CreateStandardBucket(ctx) + + return &s3Volume{ + bucketName: bucketName, + deleteBucket: deleteBucket, + authenticationSource: custom_testsuites.AuthenticationSourceFromContext(ctx), + } +} +``` + +### 1.2 Add Skip Logic for S3 Express-Specific Test Blocks +- Add skip statements to S3 Express test suites in `testsuites/cache.go`: + +```go +Describe("Express", Serial, func() { + BeforeEach(func() { + Skip("S3 Express tests are disabled") + }) + // ...existing test code... +}) + +Describe("Multi-Level", Serial, func() { + BeforeEach(func() { + Skip("Multi-level cache tests using S3 Express are disabled") + }) + // ...existing test code... +}) +``` + +### 1.3 Skip Individual S3 Express Tests in Other Suites +- Add skip statements to individual tests with S3 Express in their name: + +```go +// In testsuites/mountoptions.go +ginkgo.It("S3 express -- should not be able to access volume as a non-root user", func(ctx context.Context) { + Skip("S3 Express tests are disabled") + // ...existing test code... +}) +``` + +## 2. Ensure Consistent AWS Configuration Across All Tests + +### 2.1 Modify `awsConfig` in `testsuites/util.go` +- Update to match existing S3 client configuration, but without the retry mechanism as it's not needed for S3-compatible storage: + +```go +func awsConfig(ctx context.Context) aws.Config { + // Match the existing S3 client configuration + cfg, err := config.LoadDefaultConfig(ctx, + config.WithRegion(DefaultRegion), + config.WithCredentialsProvider(credentials.NewStaticCredentialsProvider( + s3client.DefaultAccessKey, + s3client.DefaultSecretKey, + "", + )), + ) + framework.ExpectNoError(err) + + return cfg +} +``` + +### 2.2 Add Helper Function for S3 Client Creation +- Add a helper function to ensure all S3 clients use path style and custom endpoint: + +```go +// Helper function to create properly configured S3 clients +func newS3ClientFromConfig(cfg aws.Config) *s3.Client { + return s3.NewFromConfig(cfg, func(o *s3.Options) { + o.UsePathStyle = true + o.BaseEndpoint = aws.String(s3client.DefaultEndpoint) + }) +} +``` + +### 2.3 Update Direct S3 Client Usage +- Update any code that creates S3 clients directly: + +```go +// Find and replace instances like: +client := s3.NewFromConfig(awsConfig(ctx)) + +// With: +client := newS3ClientFromConfig(awsConfig(ctx)) +``` + +## 3. Skip or Adapt AWS IAM/STS Tests + +### 3.1 Skip IAM Role Tests +- In `testsuites/credentials.go`, add skip logic for IRSA tests: + +```go +Context("IAM Roles for Service Accounts (IRSA)", Ordered, func() { + BeforeEach(func(ctx context.Context) { + Skip("These tests rely on AWS IAM/STS services - using static credentials instead") + }) + // ...existing test code... +}) +``` + +### 3.2 Skip IMDS-Dependent Tests +- Skip tests that require Instance Metadata Service: + +```go +It("should automatically detect the STS region if IMDS is available", func(ctx context.Context) { + Skip("This test requires AWS IMDS - using configured region instead") + // ...existing code... +}) +``` + +## 4. Keep All Standard S3 Operation Tests Active + +- Maintain all tests that only use basic S3 operations: + - Basic volume mounting tests + - File read/write operations + - Permission verification + - Multi-volume tests + - Mount options tests (except Express-specific ones) + +## 5. Implementation Notes + +### 5.1 Environment Variables +The tests already read the following environment variables: +- `S3_ENDPOINT_URL` - Custom S3 endpoint +- `AWS_ACCESS_KEY_ID` - Access key ID +- `AWS_SECRET_ACCESS_KEY` - Secret access key + +These should be set in the CI environment before running tests. + +### 5.2 Test Execution +The tests will be run using the existing GitHub Actions workflow, which already sets up the environment with: +```yaml +env: + AWS_ACCESS_KEY_ID: "accessKey1" + AWS_SECRET_ACCESS_KEY: "verySecretKey1" + S3_ENDPOINT_URL: +``` + +### 5.3 Expected Outcomes +After these changes: +- All standard S3 functionality tests should pass +- S3 Express tests will be skipped +- AWS IAM/STS integration tests will be skipped +- Any S3 client will use the provided endpoint, region, and credentials + +This adaptation maintains maximum test coverage for the standard S3 functionality while accommodating S3-compatible storage systems. \ No newline at end of file diff --git a/tests/e2e-kubernetes/plans/test_documentation_plan.md b/tests/e2e-kubernetes/plans/test_documentation_plan.md new file mode 100644 index 000000000..68af1fc4f --- /dev/null +++ b/tests/e2e-kubernetes/plans/test_documentation_plan.md @@ -0,0 +1,93 @@ +# E2E Kubernetes Tests Documentation Plan + +## 1. Introduction +- Purpose of the e2e test framework +- What aspects of the S3 CSI driver are being tested +- Test framework architecture overview +- Supported S3 implementations (standard S3, S3 Express) + +## 2. Test Environment Setup +- Prerequisites and required permissions +- Environment variables configuration +- Cluster creation options (kops vs. eksctl) +- Driver installation process +- Resource requirements + +## 3. Test Execution Guide +- Step-by-step instructions for running tests +- Available script actions explained (`run.sh` options) +- Required and optional parameters +- Example commands for common use cases +- Local vs CI test execution differences + +## 4. Test Suite Overview +- Active test suites and what they test +- Skipped test suites and why they're skipped +- Custom S3-specific test suites explained +- How test suites interact with S3 buckets +- Interpreting test results + +## 5. Test Scripts Documentation +- Detailed breakdown of scripts in the `scripts` directory +- Purpose and functionality of each script +- Configuration parameters +- Common customizations +- Dependencies between scripts + +## 6. S3 Client Implementation +- Overview of the `s3client` directory +- How it handles different S3 implementations +- Authentication mechanisms +- Configuration options +- Bucket management +- Error handling + +## 7. Test Implementation Details +- Test driver implementation +- Volume creation and mounting workflow +- Test utilities and helper functions +- Test data management +- Test isolation techniques + +## 8. Performance Testing with FIO +- Introduction to the `fio` directory +- FIO configuration and parameters +- How to run performance tests +- Metrics collected +- Interpreting test results +- Customizing performance tests + +## 9. Test Dependency Management +- Understanding go.mod dependencies +- Key dependencies and their purposes +- Updating dependencies +- Managing version conflicts +- Adding new dependencies + +## 10. Troubleshooting +- Common errors and their solutions +- Log collection and analysis +- Debugging techniques +- S3-specific troubleshooting +- Known issues and workarounds + +## 11. Extending the Tests +- How to add new test cases +- Adding tests for new S3 implementations +- Creating custom test scenarios +- Reusing test components +- Test helper functions +- Best practices for test development + +## 12. CI Integration +- How tests are run in CI pipelines +- Test environment configuration in CI +- Expected test outputs +- Test reports interpretation +- Common CI issues and solutions + +## 13. Appendix +- Glossary of terms +- References to related documentation +- Diagrams (test flow, architecture) +- Useful links and resources \ No newline at end of file diff --git a/tests/e2e-kubernetes/s3client/client.go b/tests/e2e-kubernetes/s3client/client.go index 1db35f4e5..2bcd4ae81 100644 --- a/tests/e2e-kubernetes/s3client/client.go +++ b/tests/e2e-kubernetes/s3client/client.go @@ -10,6 +10,7 @@ import ( "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/credentials" "github.com/aws/aws-sdk-go-v2/service/s3" "github.com/aws/aws-sdk-go-v2/service/s3/types" utilrand "k8s.io/apimachinery/pkg/util/rand" @@ -20,6 +21,15 @@ import ( // It is public in order to be modified from test binary which receives region to use as a flag. var DefaultRegion string +// DefaultEndpoint is the default S3 endpoint to use if unspecified. +var DefaultEndpoint string + +// DefaultAccessKey is the default AWS access key to use if unspecified. +var DefaultAccessKey string + +// DefaultSecretKey is the default AWS secret key to use if unspecified. +var DefaultSecretKey string + // See https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-networking.html#s3-express-endpoints var expressAZs = map[string]string{ "us-east-1": "use1-az4", @@ -49,6 +59,11 @@ func New() *Client { func NewWithRegion(region string) *Client { cfg, err := config.LoadDefaultConfig(context.Background(), config.WithRegion(region), + config.WithCredentialsProvider(credentials.NewStaticCredentialsProvider( + DefaultAccessKey, + DefaultSecretKey, + "", + )), config.WithRetryer(func() aws.Retryer { return retry.NewStandard(func(opts *retry.StandardOptions) { opts.MaxAttempts = 5 @@ -57,7 +72,10 @@ func NewWithRegion(region string) *Client { }), ) framework.ExpectNoError(err) - return &Client{region: region, client: s3.NewFromConfig(cfg)} + return &Client{region: region, client: s3.NewFromConfig(cfg, func(o *s3.Options) { + o.UsePathStyle = true + o.BaseEndpoint = aws.String(DefaultEndpoint) + })} } // CreateStandardBucket creates a new standard S3 bucket with a random name, diff --git a/tests/e2e-kubernetes/testdriver.go b/tests/e2e-kubernetes/testdriver.go index b0ee62d05..a18ea7d02 100644 --- a/tests/e2e-kubernetes/testdriver.go +++ b/tests/e2e-kubernetes/testdriver.go @@ -81,13 +81,8 @@ func (d *s3Driver) CreateVolume(ctx context.Context, config *framework.PerTestCo f.Failf("Unsupported volType: %v is specified", volumeType) } - var bucketName string - var deleteBucket s3client.DeleteBucketFunc - if config.Prefix == custom_testsuites.S3ExpressTestIdentifier { - bucketName, deleteBucket = d.client.CreateDirectoryBucket(ctx) - } else { - bucketName, deleteBucket = d.client.CreateStandardBucket(ctx) - } + // Always use standard buckets, even for S3 Express test identifier + bucketName, deleteBucket := d.client.CreateStandardBucket(ctx) return &s3Volume{ bucketName: bucketName, diff --git a/tests/e2e-kubernetes/testsuites/cache.go b/tests/e2e-kubernetes/testsuites/cache.go index 393a5afaf..d811a88d1 100644 --- a/tests/e2e-kubernetes/testsuites/cache.go +++ b/tests/e2e-kubernetes/testsuites/cache.go @@ -288,12 +288,18 @@ func (t *s3CSICacheTestSuite) DefineTests(driver storageframework.TestDriver, pa }) Describe("Express", Serial, func() { + BeforeEach(func() { + Skip("S3 Express tests are disabled") + }) testCache(cacheTestConfig{ useExpressCache: true, }) }) Describe("Multi-Level", Serial, func() { + BeforeEach(func() { + Skip("Multi-level cache tests using S3 Express are disabled") + }) testCache(cacheTestConfig{ useLocalCache: true, useExpressCache: true, @@ -305,7 +311,7 @@ func (t *s3CSICacheTestSuite) DefineTests(driver storageframework.TestDriver, pa // deleteObjectFromS3 deletes an object from given bucket by using S3 SDK. // This is useful to create some side-effects by bypassing Mountpoint. func deleteObjectFromS3(ctx context.Context, bucket string, key string) { - client := s3.NewFromConfig(awsConfig(ctx)) + client := newS3ClientFromConfig(awsConfig(ctx)) _, err := client.DeleteObject(ctx, &s3.DeleteObjectInput{ Bucket: aws.String(bucket), Key: aws.String(key), diff --git a/tests/e2e-kubernetes/testsuites/credentials.go b/tests/e2e-kubernetes/testsuites/credentials.go index d0cb7466f..957d640b1 100644 --- a/tests/e2e-kubernetes/testsuites/credentials.go +++ b/tests/e2e-kubernetes/testsuites/credentials.go @@ -371,6 +371,7 @@ func (t *s3CSICredentialsTestSuite) DefineTests(driver storageframework.TestDriv Context("IAM Roles for Service Accounts (IRSA)", Ordered, func() { BeforeEach(func(ctx context.Context) { + Skip("These tests rely on AWS IAM/STS services - using static credentials instead") if oidcProvider == "" { Skip("OIDC provider is not configured, skipping IRSA tests") } @@ -437,6 +438,7 @@ func (t *s3CSICredentialsTestSuite) DefineTests(driver storageframework.TestDriv Context("IAM Roles for Service Accounts (IRSA)", Ordered, func() { BeforeEach(func(ctx context.Context) { + Skip("These tests rely on AWS IAM/STS services - using static credentials instead") if oidcProvider == "" { Skip("OIDC provider is not configured, skipping IRSA tests") } @@ -607,6 +609,7 @@ func (t *s3CSICredentialsTestSuite) DefineTests(driver storageframework.TestDriv }) It("should automatically detect the STS region if IMDS is available", func(ctx context.Context) { + Skip("This test requires AWS IMDS - using configured region instead") if !IMDSAvailable { Skip("IMDS is not available, skipping test for automatic region detection") } diff --git a/tests/e2e-kubernetes/testsuites/mountoptions.go b/tests/e2e-kubernetes/testsuites/mountoptions.go index a51eccd26..a1b55741f 100644 --- a/tests/e2e-kubernetes/testsuites/mountoptions.go +++ b/tests/e2e-kubernetes/testsuites/mountoptions.go @@ -120,6 +120,7 @@ func (t *s3CSIMountOptionsTestSuite) DefineTests(driver storageframework.TestDri validateWriteToVolume(ctx) }) ginkgo.It("S3 express -- should access volume as a non-root user", func(ctx context.Context) { + ginkgo.Skip("S3 Express tests are disabled") l.config.Prefix = S3ExpressTestIdentifier validateWriteToVolume(ctx) }) @@ -146,6 +147,7 @@ func (t *s3CSIMountOptionsTestSuite) DefineTests(driver storageframework.TestDri accessVolAsNonRootUser(ctx) }) ginkgo.It("S3 express -- should not be able to access volume as a non-root user", func(ctx context.Context) { + ginkgo.Skip("S3 Express tests are disabled") l.config.Prefix = S3ExpressTestIdentifier accessVolAsNonRootUser(ctx) }) diff --git a/tests/e2e-kubernetes/testsuites/util.go b/tests/e2e-kubernetes/testsuites/util.go index d4b449c91..e961a237e 100644 --- a/tests/e2e-kubernetes/testsuites/util.go +++ b/tests/e2e-kubernetes/testsuites/util.go @@ -12,6 +12,9 @@ import ( "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/credentials" + "github.com/aws/aws-sdk-go-v2/service/s3" + "github.com/awslabs/aws-s3-csi-driver/tests/e2e-kubernetes/s3client" "github.com/google/uuid" "github.com/onsi/gomega" appsv1 "k8s.io/api/apps/v1" @@ -266,11 +269,28 @@ func createServiceAccount(ctx context.Context, f *framework.Framework) (*v1.Serv } func awsConfig(ctx context.Context) aws.Config { - cfg, err := config.LoadDefaultConfig(ctx, config.WithRegion(DefaultRegion)) + // Match the existing S3 client configuration + cfg, err := config.LoadDefaultConfig(ctx, + config.WithRegion(DefaultRegion), + config.WithCredentialsProvider(credentials.NewStaticCredentialsProvider( + s3client.DefaultAccessKey, + s3client.DefaultSecretKey, + "", + )), + ) framework.ExpectNoError(err) + return cfg } +// Helper function to create properly configured S3 clients +func newS3ClientFromConfig(cfg aws.Config) *s3.Client { + return s3.NewFromConfig(cfg, func(o *s3.Options) { + o.UsePathStyle = true + o.BaseEndpoint = aws.String(s3client.DefaultEndpoint) + }) +} + func waitForKubernetesObject[T any](ctx context.Context, get framework.GetFunc[T]) error { return framework.Gomega().Eventually(ctx, framework.RetryNotFound(get)). WithTimeout(1 * time.Minute).