diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 1f401e5..3a515a8 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -99,3 +99,23 @@ jobs: subject-digest: ${{ steps.build.outputs.digest }} subject-name: ghcr.io/${{ github.repository }} push-to-registry: true + + - name: Output image info to summary + if: ${{ inputs.push-image }} + run: | + IMAGE_TAG="${{ inputs.version || github.sha }}" + IMAGE_FULL="ghcr.io/${{ github.repository }}:${IMAGE_TAG}" + + echo "## Docker Image" >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + echo "Image pushed to registry:" >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + echo "\`\`\`" >> $GITHUB_STEP_SUMMARY + echo "${IMAGE_FULL}" >> $GITHUB_STEP_SUMMARY + echo "\`\`\`" >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + echo "Pull command:" >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + echo "\`\`\`bash" >> $GITHUB_STEP_SUMMARY + echo "docker pull ${IMAGE_FULL}" >> $GITHUB_STEP_SUMMARY + echo "\`\`\`" >> $GITHUB_STEP_SUMMARY diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index eb61d4a..3c86b5d 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -33,6 +33,46 @@ jobs: build: uses: ./.github/workflows/docker.yml with: - push-image: ${{ github.event_name == 'push' && github.ref == 'refs/heads/main' }} + push-image: true secrets: GH_PAT: ${{ secrets.GH_PAT }} + + e2e: + name: E2E Tests + runs-on: ubuntu-latest + needs: [build] + timeout-minutes: 20 + permissions: + contents: read + packages: read + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Setup Go with private modules + uses: ./.github/actions/setup-go-private + with: + go-version: ${{ env.GO_VERSION }} + gh-token: ${{ secrets.GH_PAT }} + + - name: Login to Registry + uses: docker/login-action@v3 + with: + registry: ghcr.io + username: ${{ github.repository_owner }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Pull operator image + run: docker pull ghcr.io/${{ github.repository }}:${{ github.sha }} + + - name: Install Kind + uses: helm/kind-action@v1 + with: + install_only: true + + - name: Run E2E tests + run: make test-e2e + env: + E2E_SKIP_OPERATOR_BUILD: "true" + E2E_OPERATOR_IMAGE: "ghcr.io/${{ github.repository }}:${{ github.sha }}" diff --git a/Makefile b/Makefile index a043f97..c9c66aa 100644 --- a/Makefile +++ b/Makefile @@ -141,10 +141,20 @@ vet: ## Run go vet against code. test: manifests generate fmt vet envtest ## Run tests. KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) --bin-dir $(LOCALBIN) -p path)" go test $$(go list ./... | grep -v /e2e) -coverprofile cover.out -# Utilize Kind or modify the e2e tests to load the image locally, enabling compatibility with other vendors. -.PHONY: test-e2e # Run the e2e tests against a Kind k8s instance that is spun up. -test-e2e: - go test ./test/e2e/ -v -ginkgo.v +# Run the e2e tests using e2e-framework (auto-creates Kind cluster) +# Note: Tests with 'disruptive' label (OperatorCrashRecovery, NoSpuriousUpdatesAfterRestart) +# restart the operator pod and should not run in parallel with other tests. +.PHONY: test-e2e +test-e2e: ## Run all e2e tests against a Kind cluster (auto-created) + go test ./test/e2e/... -v -timeout 20m + +.PHONY: test-e2e-safe +test-e2e-safe: ## Run only parallel-safe e2e tests (excludes operator restart tests) + go test ./test/e2e/... -v -timeout 15m -skip 'TestPodLifecycle_OperatorCrashRecovery|TestPodLifecycle_NoSpuriousUpdatesAfterRestart' + +.PHONY: test-e2e-disruptive +test-e2e-disruptive: ## Run only disruptive e2e tests (operator restart tests) + go test ./test/e2e/... -v -timeout 10m -run 'TestPodLifecycle_OperatorCrashRecovery|TestPodLifecycle_NoSpuriousUpdatesAfterRestart' .PHONY: lint lint: golangci-lint ## Run golangci-lint linter diff --git a/api/v1alpha1/annotations.go b/api/v1alpha1/annotations.go new file mode 100644 index 0000000..ab7c3dd --- /dev/null +++ b/api/v1alpha1/annotations.go @@ -0,0 +1,3 @@ +package v1alpha1 + +const ForceRefreshAnnotation = "ui.scality.com/force-refresh" diff --git a/config/manager/kustomization.yaml b/config/manager/kustomization.yaml index 5c5f0b8..b455e37 100644 --- a/config/manager/kustomization.yaml +++ b/config/manager/kustomization.yaml @@ -1,2 +1,8 @@ resources: - manager.yaml +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +images: +- name: controller + newName: ui-operator + newTag: e2e diff --git a/go.mod b/go.mod index a778939..fd1d9d4 100644 --- a/go.mod +++ b/go.mod @@ -5,18 +5,21 @@ go 1.24.3 require ( github.com/go-logr/logr v1.4.2 github.com/onsi/ginkgo/v2 v2.22.0 - github.com/onsi/gomega v1.34.2 + github.com/onsi/gomega v1.35.1 github.com/scality/reconciler-framework v0.0.0-20250320235513-ca024f4ffacb - k8s.io/api v0.31.2 - k8s.io/apimachinery v0.31.2 - k8s.io/client-go v0.31.2 - k8s.io/utils v0.0.0-20240902221715-702e33fdd3c3 - sigs.k8s.io/controller-runtime v0.19.0 + k8s.io/api v0.32.1 + k8s.io/apiextensions-apiserver v0.32.0 + k8s.io/apimachinery v0.32.1 + k8s.io/client-go v0.32.1 + k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 + sigs.k8s.io/controller-runtime v0.20.0 + sigs.k8s.io/e2e-framework v0.6.0 ) require ( + cel.dev/expr v0.18.0 // indirect github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 // indirect - github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect + github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect github.com/antlr4-go/antlr/v4 v4.13.0 // indirect github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a // indirect github.com/beorn7/perks v1.0.1 // indirect @@ -43,22 +46,22 @@ require ( github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-logr/zapr v1.3.0 // indirect - github.com/go-openapi/jsonpointer v0.19.6 // indirect + github.com/go-openapi/jsonpointer v0.21.0 // indirect github.com/go-openapi/jsonreference v0.20.2 // indirect - github.com/go-openapi/swag v0.22.4 // indirect + github.com/go-openapi/swag v0.23.0 // indirect github.com/go-task/slim-sprig/v3 v3.0.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.4 // indirect - github.com/google/cel-go v0.20.1 // indirect + github.com/google/btree v1.1.3 // indirect + github.com/google/cel-go v0.22.0 // indirect github.com/google/gnostic-models v0.6.8 // indirect github.com/google/go-cmp v0.6.0 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db // indirect github.com/google/uuid v1.6.0 // indirect github.com/gorilla/mux v1.8.1 // indirect + github.com/gorilla/websocket v1.5.0 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect - github.com/imdario/mergo v0.3.13 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jarcoal/httpmock v1.3.1 // indirect github.com/josharian/intern v1.0.0 // indirect @@ -67,11 +70,13 @@ require ( github.com/mailru/easyjson v0.7.7 // indirect github.com/mitchellh/hashstructure v1.1.0 // indirect github.com/moby/locker v1.0.1 // indirect + github.com/moby/spdystream v0.5.0 // indirect github.com/moby/term v0.5.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/morikuni/aec v1.0.0 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.1.0 // indirect github.com/oras-project/oras-go v0.1.0 // indirect @@ -84,7 +89,8 @@ require ( github.com/sirupsen/logrus v1.9.3 // indirect github.com/spf13/cobra v1.8.1 // indirect github.com/spf13/pflag v1.0.5 // indirect - github.com/stoewer/go-strcase v1.2.0 // indirect + github.com/stoewer/go-strcase v1.3.0 // indirect + github.com/vladimirvivien/gexe v0.4.1 // indirect github.com/x448/float16 v0.8.4 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 // indirect go.opentelemetry.io/otel v1.28.0 // indirect @@ -95,31 +101,31 @@ require ( go.opentelemetry.io/otel/trace v1.28.0 // indirect go.opentelemetry.io/proto/otlp v1.3.1 // indirect go.uber.org/multierr v1.11.0 // indirect - go.uber.org/zap v1.26.0 // indirect + go.uber.org/zap v1.27.0 // indirect golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect - golang.org/x/net v0.30.0 // indirect - golang.org/x/oauth2 v0.21.0 // indirect - golang.org/x/sync v0.8.0 // indirect - golang.org/x/sys v0.26.0 // indirect - golang.org/x/term v0.25.0 // indirect - golang.org/x/text v0.19.0 // indirect - golang.org/x/time v0.3.0 // indirect + golang.org/x/net v0.33.0 // indirect + golang.org/x/oauth2 v0.23.0 // indirect + golang.org/x/sync v0.10.0 // indirect + golang.org/x/sys v0.28.0 // indirect + golang.org/x/term v0.27.0 // indirect + golang.org/x/text v0.21.0 // indirect + golang.org/x/time v0.7.0 // indirect golang.org/x/tools v0.26.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7 // indirect google.golang.org/grpc v1.65.0 // indirect - google.golang.org/protobuf v1.34.2 // indirect + google.golang.org/protobuf v1.35.1 // indirect + gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/apiextensions-apiserver v0.31.0 // indirect - k8s.io/apiserver v0.31.0 // indirect - k8s.io/component-base v0.31.0 // indirect + k8s.io/apiserver v0.32.0 // indirect + k8s.io/component-base v0.32.1 // indirect k8s.io/klog/v2 v2.130.1 // indirect - k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect - sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.30.3 // indirect - sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect + k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f // indirect + sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0 // indirect + sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.4.2 // indirect sigs.k8s.io/yaml v1.4.0 // indirect ) diff --git a/go.sum b/go.sum index 9cd3284..73b9809 100644 --- a/go.sum +++ b/go.sum @@ -1,4 +1,6 @@ bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8= +cel.dev/expr v0.18.0 h1:CJ6drgk+Hf96lkLikr4rFf19WrU0BOWEihyZnI2TAzo= +cel.dev/expr v0.18.0/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= @@ -27,8 +29,8 @@ github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 h1:bvDV9 github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8= github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= -github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= -github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= +github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= @@ -79,6 +81,8 @@ github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hC github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0= @@ -332,8 +336,9 @@ github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= +github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= +github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= @@ -342,8 +347,8 @@ github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8 github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= -github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU= -github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= +github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= @@ -391,8 +396,10 @@ github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/cel-go v0.20.1 h1:nDx9r8S3L4pE61eDdt8igGj8rf5kjYR3ILxWIpWNi84= -github.com/google/cel-go v0.20.1/go.mod h1:kWcIzTsPX0zmQ+H3TirHstLLf9ep5QTsZBN9u4dOYLg= +github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= +github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= +github.com/google/cel-go v0.22.0 h1:b3FJZxpiv1vTMo2/5RDUqAHPxkT8mmMfJIrq1llbf7g= +github.com/google/cel-go v0.22.0/go.mod h1:BuznPXXfQDpXKWQ9sPW3TzlAJN5zzFe+i9tIs0yC4s8= github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= @@ -438,6 +445,8 @@ github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWS github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= +github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= @@ -474,8 +483,6 @@ github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJ github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= -github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk= -github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= @@ -551,6 +558,8 @@ github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A= github.com/moby/locker v1.0.1 h1:fOXqR41zeveg4fFODix+1Ch4mj/gT0NE1XJbp/epuBg= github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc= +github.com/moby/spdystream v0.5.0 h1:7r0J1Si3QO/kjRitvSLVVFUjxMEb/YLj6S9FF62JBCU= +github.com/moby/spdystream v0.5.0/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI= github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= github.com/moby/sys/mountinfo v0.6.2 h1:BzJjoreD5BMFNmD9Rus6gdd1pLuecOFPt8wC+Vygl78= @@ -575,6 +584,7 @@ github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8m github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= @@ -594,8 +604,8 @@ github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGV github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc= -github.com/onsi/gomega v1.34.2 h1:pNCwDkzrsv7MS9kpaQvVb1aVLahQXyJ/Tv5oAZMI3i8= -github.com/onsi/gomega v1.34.2/go.mod h1:v1xfxRgk0KIsG+QOdm7p8UosrOzPYRo60fd3B/1Dukc= +github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4= +github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= @@ -724,8 +734,8 @@ github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8= -github.com/stoewer/go-strcase v1.2.0 h1:Z2iHWqGXH00XYgqDmNgQbIBxf3wrNq0F3feEy0ainaU= -github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= +github.com/stoewer/go-strcase v1.3.0 h1:g0eASXYtp+yvN9fK8sH94oCIk0fau9uV1/ZdJ0AVEzs= +github.com/stoewer/go-strcase v1.3.0/go.mod h1:fAH5hQ5pehh+j3nZfvwdk2RgEgQjAoM8wodgtPmh1xo= github.com/stretchr/objx v0.0.0-20180129172003-8a3f7159479f/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -736,14 +746,13 @@ github.com/stretchr/testify v0.0.0-20180303142811-b89eecf5ca5d/go.mod h1:a8OnRci github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= -github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= @@ -762,6 +771,8 @@ github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852/go.mod h1:tw github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI= github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= +github.com/vladimirvivien/gexe v0.4.1 h1:W9gWkp8vSPjDoXDu04Yp4KljpVMaSt8IQuHswLDd5LY= +github.com/vladimirvivien/gexe v0.4.1/go.mod h1:3gjgTqE2c0VyHnU5UOIwk7gyNzZDGulPb/DJPgcw64E= github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= @@ -814,8 +825,8 @@ go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/ go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo= -go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181009213950-7c1a557ab941/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -830,8 +841,8 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= -golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw= -golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U= +golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U= +golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -900,15 +911,15 @@ golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4= -golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU= +golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I= +golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs= -golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs= +golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -918,8 +929,8 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= -golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= +golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -983,26 +994,26 @@ golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo= -golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= +golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.25.0 h1:WtHI/ltw4NvSUig5KARz9h521QvRC8RmF/cuYqifU24= -golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M= +golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q= +golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= -golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= +golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= -golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ= +golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -1088,10 +1099,10 @@ google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfG google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157 h1:7whR9kGa5LUwFtpLm2ArCEejtnxlGeLbAyjFY8sGNFw= -google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157/go.mod h1:99sLkeliLXfdj2J75X3Ho+rrVCaJze0uwN7zDDkjPVU= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 h1:BwIjyKYGsK9dMCBOorzRri8MQwmi7mT9rGHsCEinZkA= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY= +google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7 h1:YcyjlL1PRr2Q17/I0dPk2JmYS5CDXfcdb2Z3YRioEbw= +google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:OCdP9MfskevB/rbYvHTsXTtKC+3bHWajPdoKgjcYkfo= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7 h1:2035KHhUv+EpyB+hWgJnaWKJOdX1E95w2S8Rr4uWKTs= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= @@ -1118,8 +1129,8 @@ google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2 google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= -google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= +google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= +google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -1153,7 +1164,6 @@ gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= @@ -1170,30 +1180,30 @@ honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9 k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo= k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ= k8s.io/api v0.20.6/go.mod h1:X9e8Qag6JV/bL5G6bU8sdVRltWKmdHsFUGS3eVndqE8= -k8s.io/api v0.31.2 h1:3wLBbL5Uom/8Zy98GRPXpJ254nEFpl+hwndmk9RwmL0= -k8s.io/api v0.31.2/go.mod h1:bWmGvrGPssSK1ljmLzd3pwCQ9MgoTsRCuK35u6SygUk= -k8s.io/apiextensions-apiserver v0.31.0 h1:fZgCVhGwsclj3qCw1buVXCV6khjRzKC5eCFt24kyLSk= -k8s.io/apiextensions-apiserver v0.31.0/go.mod h1:b9aMDEYaEe5sdK+1T0KU78ApR/5ZVp4i56VacZYEHxk= +k8s.io/api v0.32.1 h1:f562zw9cy+GvXzXf0CKlVQ7yHJVYzLfL6JAS4kOAaOc= +k8s.io/api v0.32.1/go.mod h1:/Yi/BqkuueW1BgpoePYBRdDYfjPF5sgTr5+YqDZra5k= +k8s.io/apiextensions-apiserver v0.32.0 h1:S0Xlqt51qzzqjKPxfgX1xh4HBZE+p8KKBq+k2SWNOE0= +k8s.io/apiextensions-apiserver v0.32.0/go.mod h1:86hblMvN5yxMvZrZFX2OhIHAuFIMJIZ19bTvzkP+Fmw= k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= k8s.io/apimachinery v0.20.6/go.mod h1:ejZXtW1Ra6V1O5H8xPBGz+T3+4gfkTCeExAHKU57MAc= -k8s.io/apimachinery v0.31.2 h1:i4vUt2hPK56W6mlT7Ry+AO8eEsyxMD1U44NR22CLTYw= -k8s.io/apimachinery v0.31.2/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo= +k8s.io/apimachinery v0.32.1 h1:683ENpaCBjma4CYqsmZyhEzrGz6cjn1MY/X2jB2hkZs= +k8s.io/apimachinery v0.32.1/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE= k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU= k8s.io/apiserver v0.20.4/go.mod h1:Mc80thBKOyy7tbvFtB4kJv1kbdD0eIH8k8vianJcbFM= k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q= -k8s.io/apiserver v0.31.0 h1:p+2dgJjy+bk+B1Csz+mc2wl5gHwvNkC9QJV+w55LVrY= -k8s.io/apiserver v0.31.0/go.mod h1:KI9ox5Yu902iBnnyMmy7ajonhKnkeZYJhTZ/YI+WEMk= +k8s.io/apiserver v0.32.0 h1:VJ89ZvQZ8p1sLeiWdRJpRD6oLozNZD2+qVSLi+ft5Qs= +k8s.io/apiserver v0.32.0/go.mod h1:HFh+dM1/BE/Hm4bS4nTXHVfN6Z6tFIZPi649n83b4Ag= k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y= k8s.io/client-go v0.20.4/go.mod h1:LiMv25ND1gLUdBeYxBIwKpkSC5IsozMMmOOeSJboP+k= k8s.io/client-go v0.20.6/go.mod h1:nNQMnOvEUEsOzRRFIIkdmYOjAZrC8bgq0ExboWSU1I0= -k8s.io/client-go v0.31.2 h1:Y2F4dxU5d3AQj+ybwSMqQnpZH9F30//1ObxOKlTI9yc= -k8s.io/client-go v0.31.2/go.mod h1:NPa74jSVR/+eez2dFsEIHNa+3o09vtNaWwWwb1qSxSs= +k8s.io/client-go v0.32.1 h1:otM0AxdhdBIaQh7l1Q0jQpmo7WOFIk5FFa4bg6YMdUU= +k8s.io/client-go v0.32.1/go.mod h1:aTTKZY7MdxUaJ/KiUs8D+GssR9zJZi77ZqtzcGXIiDg= k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk= k8s.io/component-base v0.20.4/go.mod h1:t4p9EdiagbVCJKrQ1RsA5/V4rFQNDfRlevJajlGwgjI= k8s.io/component-base v0.20.6/go.mod h1:6f1MPBAeI+mvuts3sIdtpjljHWBQ2cIy38oBIWMYnrM= -k8s.io/component-base v0.31.0 h1:/KIzGM5EvPNQcYgwq5NwoQBaOlVFrghoVGr8lG6vNRs= -k8s.io/component-base v0.31.0/go.mod h1:TYVuzI1QmN4L5ItVdMSXKvH7/DtvIuas5/mm8YT3rTo= +k8s.io/component-base v0.32.1 h1:/5IfJ0dHIKBWysGV0yKTFfacZ5yNV1sulPh3ilJjRZk= +k8s.io/component-base v0.32.1/go.mod h1:j1iMMHi/sqAHeG5z+O9BFNCF698a1u0186zkjMZQ28w= k8s.io/cri-api v0.17.3/go.mod h1:X1sbHmuXhwaHs9xxYffLqJogVsnI+f6cPRcgPel7ywM= k8s.io/cri-api v0.20.1/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= k8s.io/cri-api v0.20.4/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= @@ -1204,12 +1214,12 @@ k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= -k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag= -k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98= +k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f h1:GA7//TjRY9yWGy1poLzYYJJ4JRdzg3+O6e8I+e+8T5Y= +k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f/go.mod h1:R/HEjbvWI0qdfb8viZUeVZm0X6IZnxAydC7YU42CMw4= k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20240902221715-702e33fdd3c3 h1:b2FmK8YH+QEwq/Sy2uAEhmqL5nPfGYbJOcaqjeYYZoA= -k8s.io/utils v0.0.0-20240902221715-702e33fdd3c3/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro= +k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/letsencrypt v0.0.3 h1:H7xDfhkaFFSYEJlKeq38RwX2jYcnTeHuDQyT+mMNMwM= rsc.io/letsencrypt v0.0.3/go.mod h1:buyQKZ6IXrRnB7TdkHP0RyEybLx18HHyOSoTyoOLqNY= @@ -1217,16 +1227,18 @@ rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.30.3 h1:2770sDpzrjjsAtVhSeUFseziht227YAWYHLGNM8QPwY= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.30.3/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw= -sigs.k8s.io/controller-runtime v0.19.0 h1:nWVM7aq+Il2ABxwiCizrVDSlmDcshi9llbaFbC0ji/Q= -sigs.k8s.io/controller-runtime v0.19.0/go.mod h1:iRmWllt8IlaLjvTTDLhRBXIEtkCK6hwVBJJsYS9Ajf4= -sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= -sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0 h1:CPT0ExVicCzcpeN4baWEV2ko2Z/AsiZgEdwgcfwLgMo= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw= +sigs.k8s.io/controller-runtime v0.20.0 h1:jjkMo29xEXH+02Md9qaVXfEIaMESSpy3TBWPrsfQkQs= +sigs.k8s.io/controller-runtime v0.20.0/go.mod h1:BrP3w158MwvB3ZbNpaAcIKkHQ7YGpYnzpoSTZ8E14WU= +sigs.k8s.io/e2e-framework v0.6.0 h1:p7hFzHnLKO7eNsWGI2AbC1Mo2IYxidg49BiT4njxkrM= +sigs.k8s.io/e2e-framework v0.6.0/go.mod h1:IREnCHnKgRCioLRmNi0hxSJ1kJ+aAdjEKK/gokcZu4k= +sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8= +sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo= sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.0.3/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= -sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= -sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= +sigs.k8s.io/structured-merge-diff/v4 v4.4.2 h1:MdmvkGuXi/8io6ixD5wud3vOLwc1rj0aNqRlpuvjmwA= +sigs.k8s.io/structured-merge-diff/v4 v4.4.2/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= diff --git a/internal/controller/scalityuicomponent/constants.go b/internal/controller/scalityuicomponent/constants.go index d8b4be7..87cb424 100644 --- a/internal/controller/scalityuicomponent/constants.go +++ b/internal/controller/scalityuicomponent/constants.go @@ -4,6 +4,9 @@ const ( // DefaultServicePort is the default port used to connect to the UI component service DefaultServicePort = 80 - // ForceRefreshAnnotation is the annotation key to trigger a force refresh of the configuration - ForceRefreshAnnotation = "ui.scality.com/force-refresh" + // ConditionTypeConfigurationRetrieved is the condition type for configuration retrieval status + ConditionTypeConfigurationRetrieved = "ConfigurationRetrieved" + + // ConditionReasonFetchSucceeded indicates successful configuration fetch + ConditionReasonFetchSucceeded = "FetchSucceeded" ) diff --git a/internal/controller/scalityuicomponent/controller.go b/internal/controller/scalityuicomponent/controller.go index 897600b..5e888db 100644 --- a/internal/controller/scalityuicomponent/controller.go +++ b/internal/controller/scalityuicomponent/controller.go @@ -322,7 +322,7 @@ func (r *ScalityUIComponentReconciler) processUIComponentConfig(ctx context.Cont // Check for force-refresh annotation if scalityUIComponent.Annotations != nil { - if val, exists := scalityUIComponent.Annotations[ForceRefreshAnnotation]; exists && val == "true" { + if val, exists := scalityUIComponent.Annotations[uiv1alpha1.ForceRefreshAnnotation]; exists && val == "true" { needsFetch = true reasons = append(reasons, "force-refresh annotation present") } @@ -343,9 +343,8 @@ func (r *ScalityUIComponentReconciler) processUIComponentConfig(ctx context.Cont if err != nil { logger.Error(err, "Failed to fetch micro-app configuration") - // Set ConfigurationRetrieved=False condition meta.SetStatusCondition(&scalityUIComponent.Status.Conditions, metav1.Condition{ - Type: "ConfigurationRetrieved", + Type: ConditionTypeConfigurationRetrieved, Status: metav1.ConditionFalse, Reason: "FetchFailed", Message: fmt.Sprintf("Failed to fetch configuration from image %s: %v", currentImage, err), @@ -372,7 +371,7 @@ func (r *ScalityUIComponentReconciler) parseAndApplyConfig(ctx context.Context, logger.Error(err, "Failed to parse micro-app configuration") meta.SetStatusCondition(&scalityUIComponent.Status.Conditions, metav1.Condition{ - Type: "ConfigurationRetrieved", + Type: ConditionTypeConfigurationRetrieved, Status: metav1.ConditionFalse, Reason: "ParseFailed", Message: fmt.Sprintf("Failed to parse configuration from image %s: %v", currentImage, err), @@ -396,7 +395,7 @@ func (r *ScalityUIComponentReconciler) parseAndApplyConfig(ctx context.Context, logger.Error(err, "Invalid micro-app configuration") meta.SetStatusCondition(&scalityUIComponent.Status.Conditions, metav1.Condition{ - Type: "ConfigurationRetrieved", + Type: ConditionTypeConfigurationRetrieved, Status: metav1.ConditionFalse, Reason: "ValidationFailed", Message: fmt.Sprintf("Configuration validation failed for image %s: %v", currentImage, err), @@ -415,6 +414,11 @@ func (r *ScalityUIComponentReconciler) parseAndApplyConfig(ctx context.Context, return ctrl.Result{RequeueAfter: 30 * time.Second}, nil } + existing := meta.FindStatusCondition(scalityUIComponent.Status.Conditions, ConditionTypeConfigurationRetrieved) + conditionIsTrue := existing != nil && + existing.Status == metav1.ConditionTrue && + existing.Reason == ConditionReasonFetchSucceeded + // Check if status actually changed to avoid unnecessary updates statusChanged := false if scalityUIComponent.Status.Kind != config.Metadata.Kind || @@ -423,9 +427,13 @@ func (r *ScalityUIComponentReconciler) parseAndApplyConfig(ctx context.Context, scalityUIComponent.Status.LastFetchedImage != currentImage { statusChanged = true } + if !conditionIsTrue { + statusChanged = true + } if !statusChanged { - logger.V(1).Info("Configuration unchanged, skipping status update") + logger.V(1).Info("Configuration unchanged, clearing force-refresh annotation") + r.removeForceRefreshAnnotation(ctx, scalityUIComponent) return ctrl.Result{}, nil } @@ -448,9 +456,9 @@ func (r *ScalityUIComponentReconciler) parseAndApplyConfig(ctx context.Context, } meta.SetStatusCondition(&scalityUIComponent.Status.Conditions, metav1.Condition{ - Type: "ConfigurationRetrieved", + Type: ConditionTypeConfigurationRetrieved, Status: metav1.ConditionTrue, - Reason: "FetchSucceeded", + Reason: ConditionReasonFetchSucceeded, Message: conditionMessage, }) @@ -480,7 +488,7 @@ func (r *ScalityUIComponentReconciler) removeForceRefreshAnnotation(ctx context. return } - if _, exists := scalityUIComponent.Annotations[ForceRefreshAnnotation]; !exists { + if _, exists := scalityUIComponent.Annotations[uiv1alpha1.ForceRefreshAnnotation]; !exists { return } @@ -499,11 +507,11 @@ func (r *ScalityUIComponentReconciler) removeForceRefreshAnnotation(ctx context. return nil } - if _, exists := fresh.Annotations[ForceRefreshAnnotation]; !exists { + if _, exists := fresh.Annotations[uiv1alpha1.ForceRefreshAnnotation]; !exists { return nil } - delete(fresh.Annotations, ForceRefreshAnnotation) + delete(fresh.Annotations, uiv1alpha1.ForceRefreshAnnotation) return r.Update(ctx, fresh) }) diff --git a/internal/controller/scalityuicomponent/controller_test.go b/internal/controller/scalityuicomponent/controller_test.go index 6be981f..e278eda 100644 --- a/internal/controller/scalityuicomponent/controller_test.go +++ b/internal/controller/scalityuicomponent/controller_test.go @@ -990,7 +990,7 @@ var _ = Describe("ScalityUIComponent Controller", func() { if scalityUIComponent.Annotations == nil { scalityUIComponent.Annotations = make(map[string]string) } - scalityUIComponent.Annotations[ForceRefreshAnnotation] = "true" + scalityUIComponent.Annotations[uiv1alpha1.ForceRefreshAnnotation] = "true" Expect(k8sClient.Update(ctx, scalityUIComponent)).To(Succeed()) By("Update mock to return different config") @@ -1011,7 +1011,7 @@ var _ = Describe("ScalityUIComponent Controller", func() { By("Verify force-refresh annotation was removed") Expect(k8sClient.Get(ctx, typeNamespacedName, scalityUIComponent)).To(Succeed()) - _, hasAnnotation := scalityUIComponent.Annotations[ForceRefreshAnnotation] + _, hasAnnotation := scalityUIComponent.Annotations[uiv1alpha1.ForceRefreshAnnotation] Expect(hasAnnotation).To(BeFalse(), "force-refresh annotation should be removed after fetch") By("Fifth reconcile - no fetch (annotation removed, image unchanged)") @@ -1028,7 +1028,7 @@ var _ = Describe("ScalityUIComponent Controller", func() { Name: "test-force-refresh-failure", Namespace: "default", Annotations: map[string]string{ - ForceRefreshAnnotation: "true", + uiv1alpha1.ForceRefreshAnnotation: "true", }, }, Spec: uiv1alpha1.ScalityUIComponentSpec{ @@ -1075,7 +1075,7 @@ var _ = Describe("ScalityUIComponent Controller", func() { By("Verify force-refresh annotation was NOT removed on fetch failure (only removed on parse/validation failure)") Expect(k8sClient.Get(ctx, typeNamespacedName, scalityUIComponent)).To(Succeed()) - _, hasAnnotation := scalityUIComponent.Annotations[ForceRefreshAnnotation] + _, hasAnnotation := scalityUIComponent.Annotations[uiv1alpha1.ForceRefreshAnnotation] Expect(hasAnnotation).To(BeTrue(), "force-refresh annotation should remain on fetch failure for retry") }) @@ -1087,7 +1087,7 @@ var _ = Describe("ScalityUIComponent Controller", func() { Name: "test-force-refresh-parse-fail", Namespace: "default", Annotations: map[string]string{ - ForceRefreshAnnotation: "true", + uiv1alpha1.ForceRefreshAnnotation: "true", }, }, Spec: uiv1alpha1.ScalityUIComponentSpec{ @@ -1132,7 +1132,7 @@ var _ = Describe("ScalityUIComponent Controller", func() { By("Verify force-refresh annotation was removed on parse failure") Expect(k8sClient.Get(ctx, typeNamespacedName, scalityUIComponent)).To(Succeed()) - _, hasAnnotation := scalityUIComponent.Annotations[ForceRefreshAnnotation] + _, hasAnnotation := scalityUIComponent.Annotations[uiv1alpha1.ForceRefreshAnnotation] Expect(hasAnnotation).To(BeFalse(), "force-refresh annotation should be removed on parse failure") }) }) diff --git a/internal/controller/scalityuicomponentexposer/controller.go b/internal/controller/scalityuicomponentexposer/controller.go index c1a4686..9faed42 100644 --- a/internal/controller/scalityuicomponentexposer/controller.go +++ b/internal/controller/scalityuicomponentexposer/controller.go @@ -61,10 +61,6 @@ func NewScalityUIComponentExposerReconcilerForTest(client client.Client, scheme func (r *ScalityUIComponentExposerReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { log := log.FromContext(ctx) - currentState := newReconcileContextWithCtx(ctx) - currentState.SetLog(log) - currentState.SetKubeClient(r.Client) - cr := &uiv1alpha1.ScalityUIComponentExposer{} err := r.Client.Get(ctx, req.NamespacedName, cr) if err != nil { @@ -74,6 +70,27 @@ func (r *ScalityUIComponentExposerReconciler) Reconcile(ctx context.Context, req return reconcile.Result{}, err } + // Handle deletion + if isBeingDeleted(cr) { + return r.handleDeletion(ctx, cr, log) + } + + // Ensure finalizer is present for cleanup on deletion + finalizerAdded, err := r.ensureFinalizer(ctx, cr, log) + if err != nil { + return reconcile.Result{}, err + } + + // Re-fetch the CR only if finalizer was added (object was modified) + if finalizerAdded { + if err := r.Client.Get(ctx, req.NamespacedName, cr); err != nil { + return reconcile.Result{}, err + } + } + + currentState := newReconcileContextWithCtx(ctx) + currentState.SetLog(log) + currentState.SetKubeClient(r.Client) currentState.SetOldStatus(cr.Status.DeepCopy()) resourceReconcilers := buildReducerList(r, cr, currentState) diff --git a/internal/controller/scalityuicomponentexposer/deletion.go b/internal/controller/scalityuicomponentexposer/deletion.go new file mode 100644 index 0000000..0ec0ec0 --- /dev/null +++ b/internal/controller/scalityuicomponentexposer/deletion.go @@ -0,0 +1,218 @@ +package scalityuicomponentexposer + +import ( + "context" + "fmt" + "time" + + "github.com/go-logr/logr" + uiv1alpha1 "github.com/scality/ui-operator/api/v1alpha1" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" +) + +// handleDeletion handles the cleanup when an Exposer is being deleted. +// It removes the volume from the component deployment, cleans up the ConfigMap, +// and removes the finalizer from the Exposer. +func (r *ScalityUIComponentExposerReconciler) handleDeletion( + ctx context.Context, + exposer *uiv1alpha1.ScalityUIComponentExposer, + log logr.Logger, +) (ctrl.Result, error) { + log.Info("Handling deletion for ScalityUIComponentExposer") + + // Get the component name from spec (we need this even if component doesn't exist anymore) + componentName := exposer.Spec.ScalityUIComponent + namespace := exposer.Namespace + + // Step 1: Remove volume and volumeMount from component deployment + if err := r.removeVolumeFromDeployment(ctx, namespace, componentName, log); err != nil { + log.Error(err, "Failed to remove volume from deployment") + return ctrl.Result{}, err + } + + // Step 2: Clean up ConfigMap (remove data entry and finalizer) + if err := r.cleanupConfigMap(ctx, namespace, componentName, exposer.Name, log); err != nil { + log.Error(err, "Failed to cleanup ConfigMap") + return ctrl.Result{}, err + } + + // Step 3: Remove finalizer from Exposer + if controllerutil.ContainsFinalizer(exposer, exposerFinalizer) { + controllerutil.RemoveFinalizer(exposer, exposerFinalizer) + if err := r.Client.Update(ctx, exposer); err != nil { + log.Error(err, "Failed to remove finalizer from Exposer") + return ctrl.Result{}, err + } + log.Info("Removed finalizer from Exposer") + } + + log.Info("Deletion cleanup completed successfully") + return ctrl.Result{}, nil +} + +// removeVolumeFromDeployment removes the config volume and volumeMount from the component deployment +func (r *ScalityUIComponentExposerReconciler) removeVolumeFromDeployment( + ctx context.Context, + namespace, componentName string, + log logr.Logger, +) error { + deployment := &appsv1.Deployment{} + err := r.Client.Get(ctx, types.NamespacedName{ + Name: componentName, + Namespace: namespace, + }, deployment) + + if err != nil { + if errors.IsNotFound(err) { + log.Info("Component deployment not found, skipping volume removal", "deployment", componentName) + return nil + } + return fmt.Errorf("failed to get deployment: %w", err) + } + + volumeName := volumeNamePrefix + componentName + volumeRemoved := false + mountRemoved := false + + // Remove the volume + newVolumes := make([]corev1.Volume, 0, len(deployment.Spec.Template.Spec.Volumes)) + for _, vol := range deployment.Spec.Template.Spec.Volumes { + if vol.Name == volumeName { + volumeRemoved = true + continue + } + newVolumes = append(newVolumes, vol) + } + deployment.Spec.Template.Spec.Volumes = newVolumes + + // Remove the volumeMount from all containers + for i := range deployment.Spec.Template.Spec.Containers { + container := &deployment.Spec.Template.Spec.Containers[i] + newMounts := make([]corev1.VolumeMount, 0, len(container.VolumeMounts)) + for _, mount := range container.VolumeMounts { + if mount.Name == volumeName { + mountRemoved = true + continue + } + newMounts = append(newMounts, mount) + } + container.VolumeMounts = newMounts + } + + if !volumeRemoved && !mountRemoved { + log.Info("Volume and mount already removed from deployment", "deployment", componentName) + return nil + } + + // Update the hash annotation to trigger a rolling update. + // We use "removed-" prefix (instead of a hash) to clearly indicate this is a cleanup operation, + // distinguishing it from normal config updates which use "hash-timestamp" format. + if deployment.Spec.Template.Annotations == nil { + deployment.Spec.Template.Annotations = make(map[string]string) + } + timestamp := time.Now().Format(timestampFormat) + deployment.Spec.Template.Annotations[configHashAnnotation] = "removed-" + timestamp + + if err := r.Client.Update(ctx, deployment); err != nil { + return fmt.Errorf("failed to update deployment after volume removal: %w", err) + } + + log.Info("Removed volume and volumeMount from deployment", + "deployment", componentName, "volume", volumeName) + return nil +} + +// cleanupConfigMap removes the exposer's data entry and finalizer from the ConfigMap +func (r *ScalityUIComponentExposerReconciler) cleanupConfigMap( + ctx context.Context, + namespace, componentName, exposerName string, + log logr.Logger, +) error { + configMapName := fmt.Sprintf("%s-%s", componentName, configMapNameSuffix) + configMap := &corev1.ConfigMap{} + + err := r.Client.Get(ctx, types.NamespacedName{ + Name: configMapName, + Namespace: namespace, + }, configMap) + + if err != nil { + if errors.IsNotFound(err) { + log.Info("ConfigMap not found, skipping cleanup", "configMap", configMapName) + return nil + } + return fmt.Errorf("failed to get ConfigMap: %w", err) + } + + finalizerName := configMapFinalizerPrefix + exposerName + dataKeyRemoved := false + finalizerRemoved := false + + // Remove the exposer's data entry from ConfigMap + if configMap.Data != nil { + if _, exists := configMap.Data[exposerName]; exists { + delete(configMap.Data, exposerName) + dataKeyRemoved = true + } + } + + // Remove the exposer's finalizer from ConfigMap + if controllerutil.ContainsFinalizer(configMap, finalizerName) { + controllerutil.RemoveFinalizer(configMap, finalizerName) + finalizerRemoved = true + } + + if !dataKeyRemoved && !finalizerRemoved { + log.Info("ConfigMap already cleaned up", "configMap", configMapName) + return nil + } + + // Always update first to remove the finalizer and data entry + if err := r.Client.Update(ctx, configMap); err != nil { + return fmt.Errorf("failed to update ConfigMap: %w", err) + } + log.Info("Cleaned up ConfigMap entry and finalizer", + "configMap", configMapName, "exposer", exposerName, + "remainingEntries", len(configMap.Data), "remainingFinalizers", len(configMap.Finalizers)) + + // Check if ConfigMap should be deleted (no more data entries and no more finalizers) + if len(configMap.Data) == 0 && len(configMap.Finalizers) == 0 { + // Delete the ConfigMap since it's empty and has no finalizers + if err := r.Client.Delete(ctx, configMap); err != nil { + if !errors.IsNotFound(err) { + return fmt.Errorf("failed to delete empty ConfigMap: %w", err) + } + } + log.Info("Deleted empty ConfigMap", "configMap", configMapName) + } + + return nil +} + +// ensureFinalizer adds the exposer finalizer if it's not already present. +// Returns true if the finalizer was added (and the object was updated). +func (r *ScalityUIComponentExposerReconciler) ensureFinalizer( + ctx context.Context, + exposer *uiv1alpha1.ScalityUIComponentExposer, + log logr.Logger, +) (bool, error) { + if !controllerutil.ContainsFinalizer(exposer, exposerFinalizer) { + controllerutil.AddFinalizer(exposer, exposerFinalizer) + if err := r.Client.Update(ctx, exposer); err != nil { + return false, fmt.Errorf("failed to add finalizer: %w", err) + } + log.Info("Added finalizer to Exposer") + return true, nil + } + return false, nil +} + +// isBeingDeleted checks if the Exposer is being deleted +func isBeingDeleted(exposer *uiv1alpha1.ScalityUIComponentExposer) bool { + return !exposer.DeletionTimestamp.IsZero() +} diff --git a/test/e2e/Dockerfile.e2e b/test/e2e/Dockerfile.e2e new file mode 100644 index 0000000..744fc5f --- /dev/null +++ b/test/e2e/Dockerfile.e2e @@ -0,0 +1,8 @@ +# Simplified Dockerfile for local e2e testing +# In CI, the production image from ghcr.io is used instead +# Assumes binary is pre-built locally for the correct architecture +FROM gcr.io/distroless/static:nonroot +WORKDIR / +COPY manager . +USER 65532:65532 +ENTRYPOINT ["/manager"] diff --git a/test/e2e/cascade_gc_test.go b/test/e2e/cascade_gc_test.go new file mode 100644 index 0000000..9ea845c --- /dev/null +++ b/test/e2e/cascade_gc_test.go @@ -0,0 +1,392 @@ +/* +Copyright 2025. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "context" + "testing" + + "github.com/scality/ui-operator/test/e2e/framework" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/e2e-framework/pkg/envconf" + "sigs.k8s.io/e2e-framework/pkg/features" +) + +const ( + configMapFinalizerPrefix = "uicomponentexposer.scality.com/" + configsSubdirectory = "configs" + defaultMountPath = "/app/config" +) + +type cascadeGCContextKey string + +const ( + cascadeGCNamespaceKey cascadeGCContextKey = "cascade-gc-namespace" + cascadeGCScalityUIKey cascadeGCContextKey = "cascade-gc-scalityui" + cascadeGCComponentKey cascadeGCContextKey = "cascade-gc-component" + cascadeGCExposerKey cascadeGCContextKey = "cascade-gc-exposer" +) + +func TestCascadeGC_ExposerUpdatesComponent(t *testing.T) { + feature := features.New("exposer-updates-component"). + Setup(func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + client := cfg.Client() + testNamespace := envconf.RandomName("cascade-update", 16) + scalityUIName := envconf.RandomName("cascade-update-ui", 24) + componentName := envconf.RandomName("cascade-update-comp", 24) + exposerName := envconf.RandomName("cascade-update-exp", 24) + + ns := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{Name: testNamespace}, + } + if err := client.Resources().Create(ctx, ns); err != nil { + t.Fatalf("Failed to create namespace: %v", err) + } + t.Logf("Created namespace %s", testNamespace) + + ctx = context.WithValue(ctx, cascadeGCNamespaceKey, testNamespace) + ctx = context.WithValue(ctx, cascadeGCScalityUIKey, scalityUIName) + ctx = context.WithValue(ctx, cascadeGCComponentKey, componentName) + ctx = context.WithValue(ctx, cascadeGCExposerKey, exposerName) + return ctx + }). + Assess("create ScalityUI and Component without exposer", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + client := cfg.Client() + namespace := ctx.Value(cascadeGCNamespaceKey).(string) + scalityUIName := ctx.Value(cascadeGCScalityUIKey).(string) + componentName := ctx.Value(cascadeGCComponentKey).(string) + + if err := framework.NewScalityUIBuilder(scalityUIName). + WithProductName("Cascade Update Test"). + Create(ctx, client); err != nil { + t.Fatalf("Failed to create ScalityUI: %v", err) + } + t.Logf("Created ScalityUI %s", scalityUIName) + + if err := framework.WaitForScalityUIReady(ctx, client, scalityUIName, framework.LongTimeout); err != nil { + t.Fatalf("ScalityUI not ready: %v", err) + } + + if err := framework.NewScalityUIComponentBuilder(componentName, namespace). + WithImage(framework.MockServerImage). + WithMountPath(defaultMountPath). + Create(ctx, client); err != nil { + t.Fatalf("Failed to create ScalityUIComponent: %v", err) + } + t.Logf("Created ScalityUIComponent %s", componentName) + + if err := framework.WaitForDeploymentReady(ctx, client, namespace, componentName, framework.LongTimeout); err != nil { + t.Fatalf("Component deployment not ready: %v", err) + } + + if err := framework.WaitForScalityUIComponentConfigured(ctx, client, namespace, componentName, framework.LongTimeout); err != nil { + t.Fatalf("Component not configured: %v", err) + } + t.Logf("Component ready and configured (no exposer yet)") + + return ctx + }). + Assess("verify no config volume before exposer", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + client := cfg.Client() + namespace := ctx.Value(cascadeGCNamespaceKey).(string) + componentName := ctx.Value(cascadeGCComponentKey).(string) + + volumeName := framework.ConfigVolumePrefix + componentName + err := framework.WaitForDeploymentNoVolume(ctx, client, namespace, componentName, volumeName, framework.DefaultTimeout) + if err != nil { + t.Fatalf("Expected no config volume before exposer: %v", err) + } + t.Logf("Verified: No config volume %s exists before exposer", volumeName) + + return ctx + }). + Assess("record initial ReplicaSet", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + client := cfg.Client() + namespace := ctx.Value(cascadeGCNamespaceKey).(string) + componentName := ctx.Value(cascadeGCComponentKey).(string) + + activeRS, err := framework.WaitForDeploymentStable(ctx, client, namespace, componentName, framework.LongTimeout) + if err != nil { + t.Fatalf("Deployment not stable: %v", err) + } + t.Logf("Initial ReplicaSet: %s", activeRS.Name) + + ctx = context.WithValue(ctx, cascadeGCContextKey("initial-rs-name"), activeRS.Name) + return ctx + }). + Assess("create exposer", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + client := cfg.Client() + namespace := ctx.Value(cascadeGCNamespaceKey).(string) + scalityUIName := ctx.Value(cascadeGCScalityUIKey).(string) + componentName := ctx.Value(cascadeGCComponentKey).(string) + exposerName := ctx.Value(cascadeGCExposerKey).(string) + + if err := framework.NewScalityUIComponentExposerBuilder(exposerName, namespace). + WithScalityUI(scalityUIName). + WithScalityUIComponent(componentName). + WithAppHistoryBasePath("/cascade-test"). + Create(ctx, client); err != nil { + t.Fatalf("Failed to create Exposer: %v", err) + } + t.Logf("Created ScalityUIComponentExposer %s", exposerName) + + if err := framework.WaitForScalityUIComponentExposerReady(ctx, client, namespace, exposerName, framework.LongTimeout); err != nil { + t.Fatalf("Exposer not ready: %v", err) + } + t.Logf("Exposer is ready") + + return ctx + }). + Assess("verify volume added to deployment", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + client := cfg.Client() + namespace := ctx.Value(cascadeGCNamespaceKey).(string) + componentName := ctx.Value(cascadeGCComponentKey).(string) + + volumeName := framework.ConfigVolumePrefix + componentName + if err := framework.WaitForDeploymentHasVolume(ctx, client, namespace, componentName, volumeName, framework.LongTimeout); err != nil { + t.Fatalf("Volume not added to deployment: %v", err) + } + t.Logf("Verified: Volume %s added to deployment", volumeName) + + expectedMountPath := defaultMountPath + "/" + configsSubdirectory + if err := framework.WaitForDeploymentHasVolumeMount(ctx, client, namespace, componentName, volumeName, expectedMountPath, framework.LongTimeout); err != nil { + t.Fatalf("VolumeMount not added: %v", err) + } + t.Logf("Verified: VolumeMount %s -> %s added", volumeName, expectedMountPath) + + return ctx + }). + Assess("verify new ReplicaSet created (rolling update)", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + client := cfg.Client() + namespace := ctx.Value(cascadeGCNamespaceKey).(string) + componentName := ctx.Value(cascadeGCComponentKey).(string) + initialRSName := ctx.Value(cascadeGCContextKey("initial-rs-name")).(string) + + newRSName, err := framework.WaitForNewReplicaSet(ctx, client, namespace, componentName, []string{initialRSName}, framework.LongTimeout) + if err != nil { + t.Fatalf("New ReplicaSet not created: %v", err) + } + t.Logf("Verified: New ReplicaSet created: %s (initial was %s)", newRSName, initialRSName) + + if err := framework.WaitForDeploymentReady(ctx, client, namespace, componentName, framework.LongTimeout); err != nil { + t.Fatalf("Deployment not ready after rolling update: %v", err) + } + t.Logf("Deployment ready after rolling update") + + return ctx + }). + Teardown(func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + client := cfg.Client() + namespace := ctx.Value(cascadeGCNamespaceKey).(string) + scalityUIName := ctx.Value(cascadeGCScalityUIKey).(string) + + if err := framework.DeleteScalityUI(ctx, client, scalityUIName); err != nil { + t.Logf("Warning: Failed to delete ScalityUI: %v", err) + } + + ns := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}} + if err := client.Resources().Delete(ctx, ns); err != nil { + t.Logf("Warning: Failed to delete namespace: %v", err) + } else { + t.Logf("Deleted namespace %s", namespace) + } + + return ctx + }). + Feature() + + testenv.Test(t, feature) +} + +func TestCascadeGC_ExposerDeletionCleanup(t *testing.T) { + feature := features.New("exposer-deletion-cleanup"). + Setup(func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + client := cfg.Client() + testNamespace := envconf.RandomName("cascade-delete", 16) + scalityUIName := envconf.RandomName("cascade-delete-ui", 24) + componentName := envconf.RandomName("cascade-delete-comp", 24) + exposerName := envconf.RandomName("cascade-delete-exp", 24) + + ns := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{Name: testNamespace}, + } + if err := client.Resources().Create(ctx, ns); err != nil { + t.Fatalf("Failed to create namespace: %v", err) + } + t.Logf("Created namespace %s", testNamespace) + + ctx = context.WithValue(ctx, cascadeGCNamespaceKey, testNamespace) + ctx = context.WithValue(ctx, cascadeGCScalityUIKey, scalityUIName) + ctx = context.WithValue(ctx, cascadeGCComponentKey, componentName) + ctx = context.WithValue(ctx, cascadeGCExposerKey, exposerName) + return ctx + }). + Assess("create full resource chain", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + client := cfg.Client() + namespace := ctx.Value(cascadeGCNamespaceKey).(string) + scalityUIName := ctx.Value(cascadeGCScalityUIKey).(string) + componentName := ctx.Value(cascadeGCComponentKey).(string) + exposerName := ctx.Value(cascadeGCExposerKey).(string) + + if err := framework.NewScalityUIBuilder(scalityUIName). + WithProductName("Cascade Delete Test"). + Create(ctx, client); err != nil { + t.Fatalf("Failed to create ScalityUI: %v", err) + } + + if err := framework.WaitForScalityUIReady(ctx, client, scalityUIName, framework.LongTimeout); err != nil { + t.Fatalf("ScalityUI not ready: %v", err) + } + t.Logf("ScalityUI ready") + + if err := framework.NewScalityUIComponentBuilder(componentName, namespace). + WithImage(framework.MockServerImage). + WithMountPath(defaultMountPath). + Create(ctx, client); err != nil { + t.Fatalf("Failed to create component: %v", err) + } + + if err := framework.WaitForDeploymentReady(ctx, client, namespace, componentName, framework.LongTimeout); err != nil { + t.Fatalf("Component deployment not ready: %v", err) + } + + if err := framework.WaitForScalityUIComponentConfigured(ctx, client, namespace, componentName, framework.LongTimeout); err != nil { + t.Fatalf("Component not configured: %v", err) + } + t.Logf("Component ready") + + if err := framework.NewScalityUIComponentExposerBuilder(exposerName, namespace). + WithScalityUI(scalityUIName). + WithScalityUIComponent(componentName). + WithAppHistoryBasePath("/cascade-delete"). + Create(ctx, client); err != nil { + t.Fatalf("Failed to create exposer: %v", err) + } + + if err := framework.WaitForScalityUIComponentExposerReady(ctx, client, namespace, exposerName, framework.LongTimeout); err != nil { + t.Fatalf("Exposer not ready: %v", err) + } + t.Logf("Full chain established") + + return ctx + }). + Assess("wait for stable state with volume mounted", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + client := cfg.Client() + namespace := ctx.Value(cascadeGCNamespaceKey).(string) + componentName := ctx.Value(cascadeGCComponentKey).(string) + + volumeName := framework.ConfigVolumePrefix + componentName + if err := framework.WaitForDeploymentHasVolume(ctx, client, namespace, componentName, volumeName, framework.LongTimeout); err != nil { + t.Fatalf("Volume not mounted: %v", err) + } + + if _, err := framework.WaitForDeploymentStable(ctx, client, namespace, componentName, framework.LongTimeout); err != nil { + t.Fatalf("Deployment not stable: %v", err) + } + t.Logf("Deployment stable with volume mounted") + + return ctx + }). + Assess("verify ConfigMap exists with finalizer", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + client := cfg.Client() + namespace := ctx.Value(cascadeGCNamespaceKey).(string) + componentName := ctx.Value(cascadeGCComponentKey).(string) + exposerName := ctx.Value(cascadeGCExposerKey).(string) + + configMapName := componentName + framework.RuntimeConfigMapSuffix + if err := framework.WaitForConfigMapExists(ctx, client, namespace, configMapName, framework.DefaultTimeout); err != nil { + t.Fatalf("ConfigMap not found: %v", err) + } + t.Logf("ConfigMap %s exists", configMapName) + + finalizer := configMapFinalizerPrefix + exposerName + if err := framework.WaitForConfigMapHasFinalizer(ctx, client, namespace, configMapName, finalizer, framework.DefaultTimeout); err != nil { + t.Fatalf("ConfigMap missing finalizer: %v", err) + } + t.Logf("ConfigMap has finalizer %s", finalizer) + + return ctx + }). + Assess("delete exposer", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + client := cfg.Client() + namespace := ctx.Value(cascadeGCNamespaceKey).(string) + exposerName := ctx.Value(cascadeGCExposerKey).(string) + + if err := framework.DeleteScalityUIComponentExposer(ctx, client, namespace, exposerName); err != nil { + t.Fatalf("Failed to delete exposer: %v", err) + } + t.Logf("Deleted exposer %s", exposerName) + + return ctx + }). + Assess("verify volume removed from deployment", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + client := cfg.Client() + namespace := ctx.Value(cascadeGCNamespaceKey).(string) + componentName := ctx.Value(cascadeGCComponentKey).(string) + + volumeName := framework.ConfigVolumePrefix + componentName + if err := framework.WaitForDeploymentNoVolume(ctx, client, namespace, componentName, volumeName, framework.LongTimeout); err != nil { + t.Fatalf("Volume not removed: %v", err) + } + t.Logf("Verified: Volume %s removed from deployment", volumeName) + + return ctx + }). + Assess("verify ConfigMap finalizer removed and ConfigMap deleted", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + client := cfg.Client() + namespace := ctx.Value(cascadeGCNamespaceKey).(string) + componentName := ctx.Value(cascadeGCComponentKey).(string) + exposerName := ctx.Value(cascadeGCExposerKey).(string) + + configMapName := componentName + framework.RuntimeConfigMapSuffix + finalizer := configMapFinalizerPrefix + exposerName + + if err := framework.WaitForConfigMapNoFinalizer(ctx, client, namespace, configMapName, finalizer, framework.LongTimeout); err != nil { + t.Fatalf("Finalizer not removed: %v", err) + } + t.Logf("Verified: Finalizer %s removed", finalizer) + + if err := framework.WaitForConfigMapDeleted(ctx, client, namespace, configMapName, framework.LongTimeout); err != nil { + t.Fatalf("ConfigMap not deleted by GC: %v", err) + } + t.Logf("Verified: ConfigMap %s deleted by GC", configMapName) + + return ctx + }). + Teardown(func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + client := cfg.Client() + namespace := ctx.Value(cascadeGCNamespaceKey).(string) + scalityUIName := ctx.Value(cascadeGCScalityUIKey).(string) + + if err := framework.DeleteScalityUI(ctx, client, scalityUIName); err != nil { + t.Logf("Warning: Failed to delete ScalityUI: %v", err) + } + + ns := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}} + if err := client.Resources().Delete(ctx, ns); err != nil { + t.Logf("Warning: Failed to delete namespace: %v", err) + } else { + t.Logf("Deleted namespace %s", namespace) + } + + return ctx + }). + Feature() + + testenv.Test(t, feature) +} diff --git a/test/e2e/e2e_suite_test.go b/test/e2e/e2e_suite_test.go index bc2f19b..1f41b61 100644 --- a/test/e2e/e2e_suite_test.go +++ b/test/e2e/e2e_suite_test.go @@ -17,16 +17,35 @@ limitations under the License. package e2e import ( - "fmt" + "os" "testing" - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" + "sigs.k8s.io/e2e-framework/pkg/env" + "sigs.k8s.io/e2e-framework/pkg/envconf" + "sigs.k8s.io/e2e-framework/pkg/envfuncs" + "sigs.k8s.io/e2e-framework/support/kind" + + "github.com/scality/ui-operator/test/e2e/framework" ) -// Run e2e tests using the Ginkgo runner. -func TestE2E(t *testing.T) { - RegisterFailHandler(Fail) - _, _ = fmt.Fprintf(GinkgoWriter, "Starting ui-operator suite\n") - RunSpecs(t, "e2e suite") +var testenv env.Environment + +func TestMain(m *testing.M) { + cfg := envconf.New().WithParallelTestEnabled() + testenv = env.NewWithConfig(cfg) + kindClusterName := envconf.RandomName("ui-operator-e2e", 16) + + testenv.Setup( + envfuncs.CreateCluster(kind.NewProvider(), kindClusterName), + framework.SetupScheme(), + framework.BuildAndLoadMockServerSetup(kindClusterName), + framework.DeployOperatorSetup(kindClusterName), + ) + + testenv.Finish( + framework.UndeployOperatorTeardown(), + envfuncs.DestroyCluster(kindClusterName), + ) + + os.Exit(testenv.Run(m)) } diff --git a/test/e2e/e2e_test.go b/test/e2e/e2e_test.go index 725ffe4..2fd3b4c 100644 --- a/test/e2e/e2e_test.go +++ b/test/e2e/e2e_test.go @@ -17,106 +17,263 @@ limitations under the License. package e2e import ( - "fmt" - "os/exec" - "time" + "context" + "testing" - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/e2e-framework/pkg/envconf" + "sigs.k8s.io/e2e-framework/pkg/features" - "github.com/scality/ui-operator/test/utils" + "github.com/scality/ui-operator/test/e2e/framework" ) -const namespace = "ui-operator-system" - -var _ = Describe("controller", Ordered, func() { - BeforeAll(func() { - By("installing prometheus operator") - Expect(utils.InstallPrometheusOperator()).To(Succeed()) - - By("installing the cert-manager") - Expect(utils.InstallCertManager()).To(Succeed()) - - By("creating manager namespace") - cmd := exec.Command("kubectl", "create", "ns", namespace) - _, _ = utils.Run(cmd) - }) - - AfterAll(func() { - By("uninstalling the Prometheus manager bundle") - utils.UninstallPrometheusOperator() - - By("uninstalling the cert-manager bundle") - utils.UninstallCertManager() - - By("removing manager namespace") - cmd := exec.Command("kubectl", "delete", "ns", namespace) - _, _ = utils.Run(cmd) - }) - - Context("Operator", func() { - It("should run successfully", func() { - var controllerPodName string - var err error - - // projectimage stores the name of the image used in the example - var projectimage = "example.com/ui-operator:v0.0.1" - - By("building the manager(Operator) image") - cmd := exec.Command("make", "docker-build", fmt.Sprintf("IMG=%s", projectimage)) - _, err = utils.Run(cmd) - ExpectWithOffset(1, err).NotTo(HaveOccurred()) - - By("loading the the manager(Operator) image on Kind") - err = utils.LoadImageToKindClusterWithName(projectimage) - ExpectWithOffset(1, err).NotTo(HaveOccurred()) - - By("installing CRDs") - cmd = exec.Command("make", "install") - _, err = utils.Run(cmd) - ExpectWithOffset(1, err).NotTo(HaveOccurred()) - - By("deploying the controller-manager") - cmd = exec.Command("make", "deploy", fmt.Sprintf("IMG=%s", projectimage)) - _, err = utils.Run(cmd) - ExpectWithOffset(1, err).NotTo(HaveOccurred()) - - By("validating that the controller-manager pod is running as expected") - verifyControllerUp := func() error { - // Get pod name - - cmd = exec.Command("kubectl", "get", - "pods", "-l", "control-plane=controller-manager", - "-o", "go-template={{ range .items }}"+ - "{{ if not .metadata.deletionTimestamp }}"+ - "{{ .metadata.name }}"+ - "{{ \"\\n\" }}{{ end }}{{ end }}", - "-n", namespace, - ) - - podOutput, err := utils.Run(cmd) - ExpectWithOffset(2, err).NotTo(HaveOccurred()) - podNames := utils.GetNonEmptyLines(string(podOutput)) - if len(podNames) != 1 { - return fmt.Errorf("expect 1 controller pods running, but got %d", len(podNames)) - } - controllerPodName = podNames[0] - ExpectWithOffset(2, controllerPodName).Should(ContainSubstring("controller-manager")) - - // Validate pod status - cmd = exec.Command("kubectl", "get", - "pods", controllerPodName, "-o", "jsonpath={.status.phase}", - "-n", namespace, - ) - status, err := utils.Run(cmd) - ExpectWithOffset(2, err).NotTo(HaveOccurred()) - if string(status) != "Running" { - return fmt.Errorf("controller pod in %s status", status) +func TestCRDInstallation(t *testing.T) { + expectedCRDs := []string{ + "scalityuis.ui.scality.com", + "scalityuicomponents.ui.scality.com", + "scalityuicomponentexposers.ui.scality.com", + } + + feature := features.New("crd-installation"). + Assess("all CRDs are registered and established", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + client := cfg.Client() + + for _, crdName := range expectedCRDs { + if err := framework.WaitForCRDEstablished(ctx, client, crdName); err != nil { + t.Fatalf("CRD %s not established within timeout: %v", crdName, err) } - return nil + t.Logf("✓ CRD %s is registered and established", crdName) + } + + return ctx + }). + Feature() + + testenv.Test(t, feature) +} + +func TestOperatorDeployment(t *testing.T) { + if framework.SkipOperatorDeploy() { + t.Skip("Skipping operator deployment test (E2E_SKIP_OPERATOR=true)") + } + + feature := features.New("operator-deployment"). + Assess("controller-manager deployment is ready", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + client := cfg.Client() + + err := framework.WaitForDeploymentReady( + ctx, + client, + framework.OperatorNamespace, + framework.OperatorDeployment, + framework.LongTimeout, + ) + if err != nil { + t.Fatalf("Operator deployment not ready: %v", err) + } + t.Logf("✓ Deployment %s/%s is ready", framework.OperatorNamespace, framework.OperatorDeployment) + + return ctx + }). + Assess("controller-manager pod is running", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + client := cfg.Client() + + pod, err := framework.WaitForPodRunning( + ctx, + client, + framework.OperatorNamespace, + framework.ControlPlaneLabel, + framework.ControlPlaneValue, + framework.LongTimeout, + ) + if err != nil { + t.Fatalf("Operator pod not running: %v", err) + } + t.Logf("✓ Pod %s is running", pod.Name) + + return ctx + }). + Feature() + + testenv.Test(t, feature) +} + +type smokeTestContextKey string + +const ( + smokeNamespaceKey smokeTestContextKey = "smoke-namespace" + smokeScalityUIKey smokeTestContextKey = "smoke-scalityui" + smokeComponentKey smokeTestContextKey = "smoke-component" + smokeExposerKey smokeTestContextKey = "smoke-exposer" +) + +func TestSmokeFullChain(t *testing.T) { + // Expected values from mock-server's defaultMicroAppConfig() + const ( + expectedPublicPath = "/mock/" + expectedKind = "shell" + expectedVersion = "1.0.0" + ) + + feature := features.New("smoke-full-chain"). + Setup(func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + client := cfg.Client() + + testNamespace := envconf.RandomName("e2e-smoke", 16) + scalityUIName := envconf.RandomName("e2e-smoke-ui", 24) + componentName := envconf.RandomName("e2e-smoke-comp", 24) + exposerName := envconf.RandomName("e2e-smoke-exp", 24) + + ns := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: testNamespace, + }, + } + if err := client.Resources().Create(ctx, ns); err != nil { + t.Fatalf("Failed to create namespace %s: %v", testNamespace, err) + } + t.Logf("Created namespace %s", testNamespace) + + ctx = context.WithValue(ctx, smokeNamespaceKey, testNamespace) + ctx = context.WithValue(ctx, smokeScalityUIKey, scalityUIName) + ctx = context.WithValue(ctx, smokeComponentKey, componentName) + ctx = context.WithValue(ctx, smokeExposerKey, exposerName) + return ctx + }). + Assess("create ScalityUI", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + client := cfg.Client() + scalityUIName := ctx.Value(smokeScalityUIKey).(string) + + if err := framework.NewScalityUIBuilder(scalityUIName). + WithProductName("E2E Smoke Test"). + Create(ctx, client); err != nil { + t.Fatalf("Failed to create ScalityUI: %v", err) + } + t.Logf("Created ScalityUI %s", scalityUIName) + + if err := framework.WaitForScalityUIReady(ctx, client, scalityUIName, framework.LongTimeout); err != nil { + t.Fatalf("ScalityUI not ready: %v", err) + } + t.Logf("ScalityUI is ready") + + return ctx + }). + Assess("create ScalityUIComponent", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + client := cfg.Client() + namespace := ctx.Value(smokeNamespaceKey).(string) + componentName := ctx.Value(smokeComponentKey).(string) + + if err := framework.NewScalityUIComponentBuilder(componentName, namespace). + WithImage(framework.MockServerImage). + Create(ctx, client); err != nil { + t.Fatalf("Failed to create ScalityUIComponent: %v", err) } - EventuallyWithOffset(1, verifyControllerUp, time.Minute, time.Second).Should(Succeed()) + t.Logf("Created ScalityUIComponent %s", componentName) + + if err := framework.WaitForDeploymentReady(ctx, client, namespace, componentName, framework.LongTimeout); err != nil { + t.Fatalf("ScalityUIComponent deployment not ready: %v", err) + } + t.Logf("ScalityUIComponent deployment is ready") + + if err := framework.WaitForScalityUIComponentConfigured(ctx, client, namespace, componentName, framework.LongTimeout); err != nil { + t.Fatalf("ScalityUIComponent not configured: %v", err) + } + t.Logf("ScalityUIComponent configuration retrieved") + + return ctx + }). + Assess("create ScalityUIComponentExposer", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + client := cfg.Client() + namespace := ctx.Value(smokeNamespaceKey).(string) + scalityUIName := ctx.Value(smokeScalityUIKey).(string) + componentName := ctx.Value(smokeComponentKey).(string) + exposerName := ctx.Value(smokeExposerKey).(string) + + if err := framework.NewScalityUIComponentExposerBuilder(exposerName, namespace). + WithScalityUI(scalityUIName). + WithScalityUIComponent(componentName). + WithAppHistoryBasePath("/mock"). + Create(ctx, client); err != nil { + t.Fatalf("Failed to create ScalityUIComponentExposer: %v", err) + } + t.Logf("Created ScalityUIComponentExposer %s", exposerName) + + if err := framework.WaitForScalityUIComponentExposerReady(ctx, client, namespace, exposerName, framework.LongTimeout); err != nil { + t.Fatalf("ScalityUIComponentExposer not ready: %v", err) + } + t.Logf("ScalityUIComponentExposer is ready") + + return ctx + }). + Assess("verify all resources created correctly", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + client := cfg.Client() + namespace := ctx.Value(smokeNamespaceKey).(string) + componentName := ctx.Value(smokeComponentKey).(string) + + if err := framework.WaitForServiceExists(ctx, client, namespace, componentName, framework.DefaultTimeout); err != nil { + t.Fatalf("Component Service not found: %v", err) + } + t.Logf("Component Service exists") + + runtimeConfigMapName := componentName + framework.RuntimeConfigMapSuffix + if err := framework.WaitForConfigMapExists(ctx, client, namespace, runtimeConfigMapName, framework.DefaultTimeout); err != nil { + t.Fatalf("Runtime ConfigMap not found: %v", err) + } + t.Logf("Runtime ConfigMap exists") + + component, err := framework.GetScalityUIComponent(ctx, client, namespace, componentName) + if err != nil { + t.Fatalf("Failed to get ScalityUIComponent: %v", err) + } + if component.Status.PublicPath != expectedPublicPath { + t.Fatalf("Expected PublicPath %q, got %q", expectedPublicPath, component.Status.PublicPath) + } + t.Logf("ScalityUIComponent.Status.PublicPath = %s", component.Status.PublicPath) + + if component.Status.Kind != expectedKind { + t.Fatalf("Expected Kind %q, got %q", expectedKind, component.Status.Kind) + } + t.Logf("ScalityUIComponent.Status.Kind = %s", component.Status.Kind) + + if component.Status.Version != expectedVersion { + t.Fatalf("Expected Version %q, got %q", expectedVersion, component.Status.Version) + } + t.Logf("ScalityUIComponent.Status.Version = %s", component.Status.Version) + + configVolumeName := framework.ConfigVolumePrefix + componentName + if err := framework.WaitForDeploymentHasVolume(ctx, client, namespace, componentName, configVolumeName, framework.DefaultTimeout); err != nil { + t.Fatalf("Component Deployment does not have config volume: %v", err) + } + t.Logf("Component Deployment has config volume mounted") + + return ctx + }). + Teardown(func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + client := cfg.Client() + namespace := ctx.Value(smokeNamespaceKey).(string) + scalityUIName := ctx.Value(smokeScalityUIKey).(string) + + if err := framework.DeleteScalityUI(ctx, client, scalityUIName); err != nil { + t.Logf("Warning: Failed to delete ScalityUI %s: %v", scalityUIName, err) + } else { + t.Logf("Deleted ScalityUI %s", scalityUIName) + } + + ns := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: namespace, + }, + } + if err := client.Resources().Delete(ctx, ns); err != nil { + t.Logf("Warning: Failed to delete namespace %s: %v", namespace, err) + } else { + t.Logf("Deleted namespace %s", namespace) + } + + return ctx + }). + Feature() - }) - }) -}) + testenv.Test(t, feature) +} diff --git a/test/e2e/framework/builders.go b/test/e2e/framework/builders.go new file mode 100644 index 0000000..edb4ca5 --- /dev/null +++ b/test/e2e/framework/builders.go @@ -0,0 +1,220 @@ +/* +Copyright 2025. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package framework + +import ( + "context" + + uiv1alpha1 "github.com/scality/ui-operator/api/v1alpha1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/e2e-framework/klient" +) + +type ScalityUIComponentBuilder struct { + name string + namespace string + image string + mountPath string + labels map[string]string +} + +func NewScalityUIComponentBuilder(name, namespace string) *ScalityUIComponentBuilder { + return &ScalityUIComponentBuilder{ + name: name, + namespace: namespace, + mountPath: "/app/config", + labels: make(map[string]string), + } +} + +func (b *ScalityUIComponentBuilder) WithImage(image string) *ScalityUIComponentBuilder { + b.image = image + return b +} + +func (b *ScalityUIComponentBuilder) WithMountPath(mountPath string) *ScalityUIComponentBuilder { + b.mountPath = mountPath + return b +} + +func (b *ScalityUIComponentBuilder) WithLabel(key, value string) *ScalityUIComponentBuilder { + b.labels[key] = value + return b +} + +func (b *ScalityUIComponentBuilder) Build() *uiv1alpha1.ScalityUIComponent { + component := &uiv1alpha1.ScalityUIComponent{ + ObjectMeta: metav1.ObjectMeta{ + Name: b.name, + Namespace: b.namespace, + }, + Spec: uiv1alpha1.ScalityUIComponentSpec{ + Image: b.image, + MountPath: b.mountPath, + }, + } + + if len(b.labels) > 0 { + component.Labels = b.labels + } + + return component +} + +func (b *ScalityUIComponentBuilder) Create(ctx context.Context, client klient.Client) error { + component := b.Build() + return client.Resources(b.namespace).Create(ctx, component) +} + +func DeleteScalityUIComponent(ctx context.Context, client klient.Client, namespace, name string) error { + component := &uiv1alpha1.ScalityUIComponent{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + } + return client.Resources(namespace).Delete(ctx, component) +} + +// ScalityUIBuilder builds ScalityUI resources +type ScalityUIBuilder struct { + name string + image string + productName string + labels map[string]string +} + +func NewScalityUIBuilder(name string) *ScalityUIBuilder { + return &ScalityUIBuilder{ + name: name, + image: "nginx:latest", + productName: "Test UI", + labels: make(map[string]string), + } +} + +func (b *ScalityUIBuilder) WithImage(image string) *ScalityUIBuilder { + b.image = image + return b +} + +func (b *ScalityUIBuilder) WithProductName(name string) *ScalityUIBuilder { + b.productName = name + return b +} + +func (b *ScalityUIBuilder) WithLabel(key, value string) *ScalityUIBuilder { + b.labels[key] = value + return b +} + +func (b *ScalityUIBuilder) Build() *uiv1alpha1.ScalityUI { + ui := &uiv1alpha1.ScalityUI{ + ObjectMeta: metav1.ObjectMeta{ + Name: b.name, + }, + Spec: uiv1alpha1.ScalityUISpec{ + Image: b.image, + ProductName: b.productName, + }, + } + + if len(b.labels) > 0 { + ui.Labels = b.labels + } + + return ui +} + +func (b *ScalityUIBuilder) Create(ctx context.Context, client klient.Client) error { + ui := b.Build() + return client.Resources().Create(ctx, ui) +} + +// ScalityUIComponentExposerBuilder builds ScalityUIComponentExposer resources +type ScalityUIComponentExposerBuilder struct { + name string + namespace string + scalityUI string + scalityUIComponent string + appHistoryBasePath string + labels map[string]string +} + +func NewScalityUIComponentExposerBuilder(name, namespace string) *ScalityUIComponentExposerBuilder { + return &ScalityUIComponentExposerBuilder{ + name: name, + namespace: namespace, + appHistoryBasePath: "/app", + labels: make(map[string]string), + } +} + +func (b *ScalityUIComponentExposerBuilder) WithScalityUI(name string) *ScalityUIComponentExposerBuilder { + b.scalityUI = name + return b +} + +func (b *ScalityUIComponentExposerBuilder) WithScalityUIComponent(name string) *ScalityUIComponentExposerBuilder { + b.scalityUIComponent = name + return b +} + +func (b *ScalityUIComponentExposerBuilder) WithAppHistoryBasePath(path string) *ScalityUIComponentExposerBuilder { + b.appHistoryBasePath = path + return b +} + +func (b *ScalityUIComponentExposerBuilder) WithLabel(key, value string) *ScalityUIComponentExposerBuilder { + b.labels[key] = value + return b +} + +func (b *ScalityUIComponentExposerBuilder) Build() *uiv1alpha1.ScalityUIComponentExposer { + exposer := &uiv1alpha1.ScalityUIComponentExposer{ + ObjectMeta: metav1.ObjectMeta{ + Name: b.name, + Namespace: b.namespace, + }, + Spec: uiv1alpha1.ScalityUIComponentExposerSpec{ + ScalityUI: b.scalityUI, + ScalityUIComponent: b.scalityUIComponent, + AppHistoryBasePath: b.appHistoryBasePath, + }, + } + + if len(b.labels) > 0 { + exposer.Labels = b.labels + } + + return exposer +} + +func (b *ScalityUIComponentExposerBuilder) Create(ctx context.Context, client klient.Client) error { + exposer := b.Build() + return client.Resources(b.namespace).Create(ctx, exposer) +} + +func DeleteScalityUIComponentExposer(ctx context.Context, client klient.Client, namespace, name string) error { + exposer := &uiv1alpha1.ScalityUIComponentExposer{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + } + return client.Resources(namespace).Delete(ctx, exposer) +} diff --git a/test/e2e/framework/config.go b/test/e2e/framework/config.go new file mode 100644 index 0000000..9bb0fa4 --- /dev/null +++ b/test/e2e/framework/config.go @@ -0,0 +1,71 @@ +/* +Copyright 2025. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package framework + +import ( + "os" + "runtime" + "strings" +) + +const ( + OperatorNamespace = "ui-operator-system" + OperatorDeployment = "ui-operator-controller-manager" + ControlPlaneLabel = "control-plane" + ControlPlaneValue = "controller-manager" + DefaultOperatorImage = "ui-operator:e2e" +) + +func GetOperatorImage() string { + if img := os.Getenv("E2E_OPERATOR_IMAGE"); img != "" { + return img + } + return DefaultOperatorImage +} + +func GetProjectRoot() string { + if root := os.Getenv("E2E_PROJECT_ROOT"); root != "" { + return root + } + return "../.." +} + +// SkipBuild returns true if all local builds should be skipped. +// This affects both operator and mock server builds. +func SkipBuild() bool { + val := strings.ToLower(os.Getenv("E2E_SKIP_BUILD")) + return val == "true" || val == "1" || val == "yes" +} + +// SkipOperatorBuild returns true if only the operator image build should be skipped. +// Use this when pulling a pre-built operator image from registry but still need to build mock server locally. +func SkipOperatorBuild() bool { + val := strings.ToLower(os.Getenv("E2E_SKIP_OPERATOR_BUILD")) + return val == "true" || val == "1" || val == "yes" +} + +func SkipOperatorDeploy() bool { + val := strings.ToLower(os.Getenv("E2E_SKIP_OPERATOR")) + return val == "true" || val == "1" || val == "yes" +} + +func GetTargetArch() string { + if arch := os.Getenv("E2E_TARGET_ARCH"); arch != "" { + return arch + } + return runtime.GOARCH +} diff --git a/test/e2e/framework/mock_client.go b/test/e2e/framework/mock_client.go new file mode 100644 index 0000000..302fb03 --- /dev/null +++ b/test/e2e/framework/mock_client.go @@ -0,0 +1,399 @@ +/* +Copyright 2025. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package framework + +import ( + "bytes" + "context" + "crypto/rand" + "encoding/hex" + "encoding/json" + "fmt" + "io" + "net/http" + "os/exec" + "strings" + "sync" + "time" +) + +const curlImage = "curlimages/curl:8.6.0" + +func randomString(n int) string { + bytes := make([]byte, n) + if _, err := rand.Read(bytes); err != nil { + return "fallback" + } + return hex.EncodeToString(bytes)[:n] +} + +type MockServerClient struct { + namespace string + serviceName string + servicePort int + curlPodNames map[string]string + mu sync.Mutex +} + +func NewMockServerClient(namespace, serviceName string) *MockServerClient { + return &MockServerClient{ + namespace: namespace, + serviceName: serviceName, + servicePort: MockServerServicePort, + curlPodNames: map[string]string{}, + } +} + +func (c *MockServerClient) curlPodName(namespace string) string { + c.mu.Lock() + defer c.mu.Unlock() + + if name, ok := c.curlPodNames[namespace]; ok { + return name + } + + name := fmt.Sprintf("curl-%s", randomString(8)) + c.curlPodNames[namespace] = name + return name +} + +func (c *MockServerClient) ensureCurlPod(ctx context.Context, namespace string) (string, error) { + podName := c.curlPodName(namespace) + + if err := c.waitForCurlPodReady(ctx, namespace, podName); err == nil { + return podName, nil + } + + args := []string{ + "run", podName, + "--image=" + curlImage, + "--namespace", namespace, + "--restart=Never", + "--image-pull-policy=IfNotPresent", + "--command", + "--", + "sleep", "300", + } + cmd := exec.CommandContext(ctx, "kubectl", args...) + var stdout, stderr bytes.Buffer + cmd.Stdout = &stdout + cmd.Stderr = &stderr + + if err := cmd.Run(); err != nil { + if !strings.Contains(stderr.String(), "AlreadyExists") { + return "", fmt.Errorf("failed to create curl pod: %w, stderr: %s", err, stderr.String()) + } + } + + if err := c.waitForCurlPodReady(ctx, namespace, podName); err != nil { + return "", err + } + + return podName, nil +} + +func (c *MockServerClient) waitForCurlPodReady(ctx context.Context, namespace, podName string) error { + args := []string{ + "wait", + "--for=condition=Ready", + "pod/" + podName, + "--namespace", namespace, + "--timeout=30s", + } + cmd := exec.CommandContext(ctx, "kubectl", args...) + var stdout, stderr bytes.Buffer + cmd.Stdout = &stdout + cmd.Stderr = &stderr + + if err := cmd.Run(); err != nil { + return fmt.Errorf("curl pod not ready: %w, stderr: %s", err, stderr.String()) + } + return nil +} + +func (c *MockServerClient) execCurlCommand(ctx context.Context, fromNamespace string, curlArgs []string) (string, error) { + podName, err := c.ensureCurlPod(ctx, fromNamespace) + if err != nil { + return "", err + } + + args := []string{"exec", podName, "--namespace", fromNamespace, "--"} + args = append(args, curlArgs...) + + cmd := exec.CommandContext(ctx, "kubectl", args...) + var stdout, stderr bytes.Buffer + cmd.Stdout = &stdout + cmd.Stderr = &stderr + + if err := cmd.Run(); err != nil { + return "", fmt.Errorf("curl failed: %w, stderr: %s, stdout: %s", err, stderr.String(), stdout.String()) + } + + return strings.TrimSpace(stdout.String()), nil +} + +func (c *MockServerClient) CleanupCurlPods(ctx context.Context) error { + c.mu.Lock() + podNames := make(map[string]string) + for k, v := range c.curlPodNames { + podNames[k] = v + } + c.mu.Unlock() + + var lastErr error + for namespace, podName := range podNames { + args := []string{"delete", "pod", podName, "--namespace", namespace, "--ignore-not-found"} + cmd := exec.CommandContext(ctx, "kubectl", args...) + if err := cmd.Run(); err != nil { + lastErr = err + } + } + + c.mu.Lock() + c.curlPodNames = map[string]string{} + c.mu.Unlock() + + return lastErr +} + +func (c *MockServerClient) GetCounter(ctx context.Context) (int64, error) { + output, err := c.execCurl(ctx, "GET", "/_/counter", "") + if err != nil { + return 0, err + } + + var result struct { + Count int64 `json:"count"` + } + if err := json.Unmarshal([]byte(output), &result); err != nil { + return 0, fmt.Errorf("failed to parse counter response: %w", err) + } + return result.Count, nil +} + +func (c *MockServerClient) Reset(ctx context.Context) error { + _, err := c.execCurl(ctx, "POST", "/_/reset", "") + return err +} + +// TestFetch makes a test request to verify the mock server's current response +// Returns the HTTP status code and response body +func (c *MockServerClient) TestFetch(ctx context.Context) (int, string, error) { + return c.TestFetchFromNamespace(ctx, c.namespace) +} + +// TestFetchFromNamespace makes a test request from a specific namespace +// This helps diagnose cross-namespace connectivity issues +func (c *MockServerClient) TestFetchFromNamespace(ctx context.Context, fromNamespace string) (int, string, error) { + url := fmt.Sprintf("http://%s.%s.svc.cluster.local:%d/.well-known/micro-app-configuration", + c.serviceName, c.namespace, c.servicePort) + + output, err := c.execCurlCommand(ctx, fromNamespace, []string{"curl", "-s", "-w", "\n%{http_code}", url}) + if err != nil { + return 0, "", err + } + + lines := strings.Split(output, "\n") + if len(lines) < 2 { + return 0, output, fmt.Errorf("unexpected output format: %s", output) + } + + // Last line is the status code + statusCodeStr := strings.TrimSpace(lines[len(lines)-1]) + statusCode := 0 + if _, err := fmt.Sscanf(statusCodeStr, "%d", &statusCode); err != nil { + return 0, output, fmt.Errorf("failed to parse status code %q: %w", statusCodeStr, err) + } + + // Everything before the last line is the body + body := strings.Join(lines[:len(lines)-1], "\n") + + return statusCode, body, nil +} + +func (c *MockServerClient) SetConfig(ctx context.Context, delay int, statusCode int, response string) error { + config := map[string]interface{}{} + if delay > 0 { + config["delay"] = delay + } + if statusCode > 0 { + config["statusCode"] = statusCode + } + if response != "" { + config["response"] = response + } + + body, err := json.Marshal(config) + if err != nil { + return err + } + + _, err = c.execCurl(ctx, "POST", "/_/config", string(body)) + return err +} + +func (c *MockServerClient) SetDelay(ctx context.Context, delayMs int) error { + return c.SetConfig(ctx, delayMs, 0, "") +} + +func (c *MockServerClient) SetStatusCode(ctx context.Context, statusCode int) error { + return c.SetConfig(ctx, 0, statusCode, "") +} + +func (c *MockServerClient) SetResponse(ctx context.Context, response string) error { + return c.SetConfig(ctx, 0, 0, response) +} + +func (c *MockServerClient) execCurl(ctx context.Context, method, path, body string) (string, error) { + url := fmt.Sprintf("http://%s.%s.svc.cluster.local:%d%s", c.serviceName, c.namespace, c.servicePort, path) + + curlArgs := []string{"curl", "-s", "-X", method} + + if body != "" { + curlArgs = append(curlArgs, "-H", "Content-Type: application/json", "-d", body) + } + curlArgs = append(curlArgs, url) + + output, err := c.execCurlCommand(ctx, c.namespace, curlArgs) + if err != nil { + return "", err + } + output = extractJSON(output) + + return output, nil +} + +func extractJSON(s string) string { + start := strings.Index(s, "{") + if start == -1 { + return s + } + + depth := 0 + inString := false + escape := false + for i := start; i < len(s); i++ { + c := s[i] + if escape { + escape = false + continue + } + if c == '\\' && inString { + escape = true + continue + } + if c == '"' { + inString = !inString + continue + } + if inString { + continue + } + switch c { + case '{': + depth++ + case '}': + depth-- + if depth == 0 { + return s[start : i+1] + } + } + } + return s[start:] +} + +type MockServerDirectClient struct { + baseURL string + httpClient *http.Client +} + +func NewMockServerDirectClient(baseURL string) *MockServerDirectClient { + return &MockServerDirectClient{ + baseURL: baseURL, + httpClient: &http.Client{ + Timeout: 10 * time.Second, + }, + } +} + +func (c *MockServerDirectClient) GetCounter(ctx context.Context) (int64, error) { + req, err := http.NewRequestWithContext(ctx, "GET", c.baseURL+"/_/counter", nil) + if err != nil { + return 0, err + } + + resp, err := c.httpClient.Do(req) + if err != nil { + return 0, err + } + defer resp.Body.Close() + + var result struct { + Count int64 `json:"count"` + } + if err := json.NewDecoder(resp.Body).Decode(&result); err != nil { + return 0, err + } + return result.Count, nil +} + +func (c *MockServerDirectClient) Reset(ctx context.Context) error { + req, err := http.NewRequestWithContext(ctx, "POST", c.baseURL+"/_/reset", nil) + if err != nil { + return err + } + + resp, err := c.httpClient.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + io.Copy(io.Discard, resp.Body) + return nil +} + +func (c *MockServerDirectClient) SetConfig(ctx context.Context, delay int, statusCode int, response string) error { + config := map[string]interface{}{} + if delay > 0 { + config["delay"] = delay + } + if statusCode > 0 { + config["statusCode"] = statusCode + } + if response != "" { + config["response"] = response + } + + body, err := json.Marshal(config) + if err != nil { + return err + } + + req, err := http.NewRequestWithContext(ctx, "POST", c.baseURL+"/_/config", bytes.NewReader(body)) + if err != nil { + return err + } + req.Header.Set("Content-Type", "application/json") + + resp, err := c.httpClient.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + io.Copy(io.Discard, resp.Body) + return nil +} diff --git a/test/e2e/framework/mock_server.go b/test/e2e/framework/mock_server.go new file mode 100644 index 0000000..5562368 --- /dev/null +++ b/test/e2e/framework/mock_server.go @@ -0,0 +1,232 @@ +/* +Copyright 2025. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package framework + +import ( + "context" + "fmt" + "os" + "os/exec" + "path/filepath" + "time" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/apimachinery/pkg/util/wait" + "sigs.k8s.io/e2e-framework/klient" + "sigs.k8s.io/e2e-framework/pkg/envconf" +) + +const ( + MockServerImage = "mock-server:e2e" + MockServerPort = 80 + MockServerServicePort = 80 +) + +type MockServer struct { + Name string + Namespace string + client klient.Client +} + +func NewMockServer(name, namespace string, client klient.Client) *MockServer { + return &MockServer{ + Name: name, + Namespace: namespace, + client: client, + } +} + +func (m *MockServer) Deploy(ctx context.Context) error { + replicas := int32(1) + labels := map[string]string{ + "app": m.Name, + "app.kubernetes.io/managed-by": "e2e-test", + } + + deployment := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: m.Name, + Namespace: m.Namespace, + }, + Spec: appsv1.DeploymentSpec{ + Replicas: &replicas, + Selector: &metav1.LabelSelector{ + MatchLabels: labels, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: labels, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "mock-server", + Image: MockServerImage, + ImagePullPolicy: corev1.PullIfNotPresent, + Ports: []corev1.ContainerPort{ + { + ContainerPort: MockServerPort, + Protocol: corev1.ProtocolTCP, + }, + }, + ReadinessProbe: &corev1.Probe{ + ProbeHandler: corev1.ProbeHandler{ + HTTPGet: &corev1.HTTPGetAction{ + Path: "/healthz", + Port: intstr.FromInt(MockServerPort), + }, + }, + InitialDelaySeconds: 1, + PeriodSeconds: 2, + }, + LivenessProbe: &corev1.Probe{ + ProbeHandler: corev1.ProbeHandler{ + HTTPGet: &corev1.HTTPGetAction{ + Path: "/healthz", + Port: intstr.FromInt(MockServerPort), + }, + }, + InitialDelaySeconds: 1, + PeriodSeconds: 5, + }, + }, + }, + }, + }, + }, + } + + if err := m.client.Resources(m.Namespace).Create(ctx, deployment); err != nil { + return fmt.Errorf("failed to create mock server deployment: %w", err) + } + + service := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: m.Name, + Namespace: m.Namespace, + }, + Spec: corev1.ServiceSpec{ + Selector: labels, + Ports: []corev1.ServicePort{ + { + Port: MockServerServicePort, + TargetPort: intstr.FromInt(MockServerPort), + Protocol: corev1.ProtocolTCP, + }, + }, + }, + } + + if err := m.client.Resources(m.Namespace).Create(ctx, service); err != nil { + return fmt.Errorf("failed to create mock server service: %w", err) + } + + return nil +} + +func (m *MockServer) WaitReady(ctx context.Context, timeout time.Duration) error { + return wait.PollUntilContextTimeout(ctx, DefaultPollInterval, timeout, true, func(ctx context.Context) (bool, error) { + var deployment appsv1.Deployment + if err := m.client.Resources(m.Namespace).Get(ctx, m.Name, m.Namespace, &deployment); err != nil { + return false, nil + } + + if deployment.Status.ReadyReplicas > 0 && + deployment.Status.ReadyReplicas == *deployment.Spec.Replicas { + return true, nil + } + return false, nil + }) +} + +func (m *MockServer) Delete(ctx context.Context) error { + deployment := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: m.Name, + Namespace: m.Namespace, + }, + } + if err := m.client.Resources(m.Namespace).Delete(ctx, deployment); err != nil { + return fmt.Errorf("failed to delete mock server deployment: %w", err) + } + + service := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: m.Name, + Namespace: m.Namespace, + }, + } + if err := m.client.Resources(m.Namespace).Delete(ctx, service); err != nil { + return fmt.Errorf("failed to delete mock server service: %w", err) + } + + return nil +} + +func (m *MockServer) ServiceURL() string { + return fmt.Sprintf("http://%s.%s.svc.cluster.local:%d", m.Name, m.Namespace, MockServerServicePort) +} + +func (m *MockServer) ServiceName() string { + return m.Name +} + +func BuildAndLoadMockServerImage(projectRoot, clusterName string) error { + mockServerDir := filepath.Join(projectRoot, "test", "e2e", "mock-server") + + fmt.Println("Building mock server image...") + buildCmd := exec.Command("docker", "build", "-t", MockServerImage, ".") + buildCmd.Dir = mockServerDir + buildCmd.Stdout = os.Stdout + buildCmd.Stderr = os.Stderr + if err := buildCmd.Run(); err != nil { + return fmt.Errorf("failed to build mock server image: %w", err) + } + + fmt.Println("Loading mock server image to Kind cluster...") + loadCmd := exec.Command("kind", "load", "docker-image", MockServerImage, "--name", clusterName) + loadCmd.Stdout = os.Stdout + loadCmd.Stderr = os.Stderr + if err := loadCmd.Run(); err != nil { + return fmt.Errorf("failed to load mock server image to Kind: %w", err) + } + + return nil +} + +func BuildAndLoadMockServerSetup(clusterName string) func(ctx context.Context, cfg *envconf.Config) (context.Context, error) { + return func(ctx context.Context, cfg *envconf.Config) (context.Context, error) { + if SkipBuild() { + fmt.Println("Skipping mock server build (E2E_SKIP_BUILD=true)") + return ctx, nil + } + + projectRoot, err := filepath.Abs(GetProjectRoot()) + if err != nil { + return ctx, fmt.Errorf("failed to get project root: %w", err) + } + + if err := BuildAndLoadMockServerImage(projectRoot, clusterName); err != nil { + return ctx, err + } + + return ctx, nil + } +} diff --git a/test/e2e/framework/operator.go b/test/e2e/framework/operator.go new file mode 100644 index 0000000..805aa32 --- /dev/null +++ b/test/e2e/framework/operator.go @@ -0,0 +1,241 @@ +/* +Copyright 2025. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package framework + +import ( + "bytes" + "context" + "fmt" + "os" + "os/exec" + "path/filepath" + "time" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "sigs.k8s.io/e2e-framework/klient" + "sigs.k8s.io/e2e-framework/pkg/envconf" +) + +func BuildOperatorImage(projectRoot, image string) error { + arch := GetTargetArch() + fmt.Printf("Building operator binary for linux/%s...\n", arch) + + buildCmd := exec.Command("go", "build", "-o", "bin/manager", "cmd/main.go") + buildCmd.Dir = projectRoot + buildCmd.Stdout = os.Stdout + buildCmd.Stderr = os.Stderr + buildCmd.Env = append(os.Environ(), + "CGO_ENABLED=0", + "GOOS=linux", + fmt.Sprintf("GOARCH=%s", arch), + ) + if err := buildCmd.Run(); err != nil { + return fmt.Errorf("failed to build binary: %w", err) + } + + fmt.Println("Building Docker image with pre-built binary...") + dockerCmd := exec.Command("docker", "build", + "-f", "test/e2e/Dockerfile.e2e", + "-t", image, + "bin", + ) + dockerCmd.Dir = projectRoot + dockerCmd.Stdout = os.Stdout + dockerCmd.Stderr = os.Stderr + return dockerCmd.Run() +} + +func LoadImageToKind(clusterName, image string) error { + cmd := exec.Command("kind", "load", "docker-image", image, "--name", clusterName) + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + return cmd.Run() +} + +func DeployOperator(projectRoot, image string) error { + cmd := exec.Command("make", "deploy", fmt.Sprintf("IMG=%s", image)) + cmd.Dir = projectRoot + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + return cmd.Run() +} + +func UndeployOperator(projectRoot string) error { + cmd := exec.Command("make", "undeploy", "ignore-not-found=true") + cmd.Dir = projectRoot + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + return cmd.Run() +} + +func WaitForDeploymentReady(ctx context.Context, client klient.Client, namespace, name string, timeout time.Duration) error { + return wait.PollUntilContextTimeout(ctx, DefaultPollInterval, timeout, true, func(ctx context.Context) (bool, error) { + var deployment appsv1.Deployment + if err := client.Resources(namespace).Get(ctx, name, namespace, &deployment); err != nil { + return false, nil + } + + if deployment.Spec.Replicas == nil { + return false, nil + } + + if deployment.Status.ReadyReplicas == *deployment.Spec.Replicas && + deployment.Status.UpdatedReplicas == *deployment.Spec.Replicas && + deployment.Status.AvailableReplicas == *deployment.Spec.Replicas { + return true, nil + } + return false, nil + }) +} + +func WaitForPodRunning(ctx context.Context, client klient.Client, namespace string, labelKey, labelValue string, timeout time.Duration) (*corev1.Pod, error) { + var foundPod *corev1.Pod + + err := wait.PollUntilContextTimeout(ctx, DefaultPollInterval, timeout, true, func(ctx context.Context) (bool, error) { + var pods corev1.PodList + if err := client.Resources(namespace).List(ctx, &pods); err != nil { + return false, nil + } + + for i := range pods.Items { + pod := &pods.Items[i] + if val, ok := pod.Labels[labelKey]; ok && val == labelValue { + if pod.Status.Phase == corev1.PodRunning { + allReady := true + for _, cs := range pod.Status.ContainerStatuses { + if !cs.Ready { + allReady = false + break + } + } + if allReady { + foundPod = pod + return true, nil + } + } + } + } + return false, nil + }) + + return foundPod, err +} + +func GetPodLogs(ctx context.Context, client klient.Client, namespace, podName, containerName string) (string, error) { + config := client.RESTConfig() + clientset, err := kubernetes.NewForConfig(config) + if err != nil { + return "", err + } + + req := clientset.CoreV1().Pods(namespace).GetLogs(podName, &corev1.PodLogOptions{ + Container: containerName, + }) + + podLogs, err := req.Stream(ctx) + if err != nil { + return "", err + } + defer podLogs.Close() + + buf := new(bytes.Buffer) + _, err = buf.ReadFrom(podLogs) + if err != nil { + return "", err + } + + return buf.String(), nil +} + +// Ensure rest package is used +var _ rest.Config + +type clusterNameKey struct{} + +func SetClusterName(ctx context.Context, name string) context.Context { + return context.WithValue(ctx, clusterNameKey{}, name) +} + +func GetClusterName(ctx context.Context) string { + if name, ok := ctx.Value(clusterNameKey{}).(string); ok { + return name + } + return "" +} + +func DeployOperatorSetup(clusterName string) func(ctx context.Context, cfg *envconf.Config) (context.Context, error) { + return func(ctx context.Context, cfg *envconf.Config) (context.Context, error) { + if SkipOperatorDeploy() { + fmt.Println("Skipping operator deployment (E2E_SKIP_OPERATOR=true)") + return SetClusterName(ctx, clusterName), nil + } + + projectRoot, err := filepath.Abs(GetProjectRoot()) + if err != nil { + return ctx, fmt.Errorf("failed to get project root: %w", err) + } + + image := GetOperatorImage() + + if SkipBuild() || SkipOperatorBuild() { + fmt.Printf("Skipping operator image build, using existing image: %s\n", image) + } else { + fmt.Printf("Building operator image: %s\n", image) + if err := BuildOperatorImage(projectRoot, image); err != nil { + return ctx, fmt.Errorf("failed to build operator image: %w", err) + } + } + + fmt.Printf("Loading image to Kind cluster: %s\n", clusterName) + if err := LoadImageToKind(clusterName, image); err != nil { + return ctx, fmt.Errorf("failed to load image to Kind: %w", err) + } + + fmt.Printf("Deploying operator with image: %s\n", image) + if err := DeployOperator(projectRoot, image); err != nil { + return ctx, fmt.Errorf("failed to deploy operator: %w", err) + } + + ctx = SetClusterName(ctx, clusterName) + return ctx, nil + } +} + +func UndeployOperatorTeardown() func(ctx context.Context, cfg *envconf.Config) (context.Context, error) { + return func(ctx context.Context, cfg *envconf.Config) (context.Context, error) { + if SkipOperatorDeploy() { + fmt.Println("Skipping operator undeploy (E2E_SKIP_OPERATOR=true)") + return ctx, nil + } + + projectRoot, err := filepath.Abs(GetProjectRoot()) + if err != nil { + return ctx, fmt.Errorf("failed to get project root: %w", err) + } + + fmt.Println("Undeploying operator...") + if err := UndeployOperator(projectRoot); err != nil { + fmt.Printf("Warning: failed to undeploy operator: %v\n", err) + } + + return ctx, nil + } +} diff --git a/test/e2e/framework/resources.go b/test/e2e/framework/resources.go new file mode 100644 index 0000000..a4d8d97 --- /dev/null +++ b/test/e2e/framework/resources.go @@ -0,0 +1,84 @@ +/* +Copyright 2025. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package framework + +import ( + "bytes" + "context" + "fmt" + "os" + "path/filepath" + "text/template" + + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/serializer/yaml" + "sigs.k8s.io/e2e-framework/klient" +) + +func LoadYAMLWithTemplate(path string, values map[string]string) ([]byte, error) { + content, err := os.ReadFile(path) + if err != nil { + return nil, fmt.Errorf("failed to read file %s: %w", path, err) + } + + tmpl, err := template.New(filepath.Base(path)).Parse(string(content)) + if err != nil { + return nil, fmt.Errorf("failed to parse template %s: %w", path, err) + } + + var buf bytes.Buffer + if err := tmpl.Execute(&buf, values); err != nil { + return nil, fmt.Errorf("failed to execute template %s: %w", path, err) + } + + return buf.Bytes(), nil +} + +func LoadAndApplyYAML(ctx context.Context, client klient.Client, path string, namespace string) error { + values := map[string]string{ + "namespace": namespace, + } + + content, err := LoadYAMLWithTemplate(path, values) + if err != nil { + return err + } + + dec := yaml.NewDecodingSerializer(unstructured.UnstructuredJSONScheme) + obj := &unstructured.Unstructured{} + _, _, err = dec.Decode(content, nil, obj) + if err != nil { + return fmt.Errorf("failed to decode YAML %s: %w", path, err) + } + + if obj.GetNamespace() == "" && namespace != "" { + if obj.GetKind() != "ScalityUI" { + obj.SetNamespace(namespace) + } + } + + if err := client.Resources(obj.GetNamespace()).Create(ctx, obj); err != nil { + return fmt.Errorf("failed to create resource from %s: %w", path, err) + } + + return nil +} + +func GetTestDataPath(subpath string) string { + projectRoot := GetProjectRoot() + return filepath.Join(projectRoot, "test", "e2e", "testdata", subpath) +} diff --git a/test/e2e/framework/scheme.go b/test/e2e/framework/scheme.go new file mode 100644 index 0000000..3cb2042 --- /dev/null +++ b/test/e2e/framework/scheme.go @@ -0,0 +1,46 @@ +/* +Copyright 2025. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package framework + +import ( + "context" + + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/e2e-framework/pkg/envconf" + + uiv1alpha1 "github.com/scality/ui-operator/api/v1alpha1" +) + +func AddToScheme(scheme *runtime.Scheme) error { + if err := apiextensionsv1.AddToScheme(scheme); err != nil { + return err + } + if err := uiv1alpha1.AddToScheme(scheme); err != nil { + return err + } + return nil +} + +func SetupScheme() func(ctx context.Context, cfg *envconf.Config) (context.Context, error) { + return func(ctx context.Context, cfg *envconf.Config) (context.Context, error) { + if err := AddToScheme(cfg.Client().Resources().GetScheme()); err != nil { + return ctx, err + } + return ctx, nil + } +} diff --git a/test/e2e/framework/wait.go b/test/e2e/framework/wait.go new file mode 100644 index 0000000..f66640b --- /dev/null +++ b/test/e2e/framework/wait.go @@ -0,0 +1,908 @@ +/* +Copyright 2025. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package framework + +import ( + "context" + "encoding/json" + "fmt" + "strings" + "time" + + uiv1alpha1 "github.com/scality/ui-operator/api/v1alpha1" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + "sigs.k8s.io/e2e-framework/klient" +) + +const ( + DefaultPollInterval = time.Second + DefaultTimeout = 30 * time.Second + LongTimeout = 1 * time.Minute +) + +const ( + ConditionConfigurationRetrieved = "ConfigurationRetrieved" + ConditionDependenciesReady = "DependenciesReady" + ConditionConfigMapReady = "ConfigMapReady" + ConditionDeploymentReady = "DeploymentReady" + + RuntimeConfigMapSuffix = "-runtime-app-configuration" + ConfigVolumePrefix = "config-volume-" +) + +func WaitForCRDEstablished(ctx context.Context, client klient.Client, crdName string) error { + return wait.PollUntilContextTimeout(ctx, DefaultPollInterval, DefaultTimeout, true, func(ctx context.Context) (bool, error) { + var crd apiextensionsv1.CustomResourceDefinition + if err := client.Resources().Get(ctx, crdName, "", &crd); err != nil { + return false, nil + } + + for _, cond := range crd.Status.Conditions { + if cond.Type == apiextensionsv1.Established && cond.Status == apiextensionsv1.ConditionTrue { + return true, nil + } + } + return false, nil + }) +} + +func WaitForScalityUIReady(ctx context.Context, client klient.Client, name string, timeout time.Duration) error { + var lastPhase string + var lastConditions string + + err := wait.PollUntilContextTimeout(ctx, DefaultPollInterval, timeout, true, func(ctx context.Context) (bool, error) { + var ui uiv1alpha1.ScalityUI + if err := client.Resources().Get(ctx, name, "", &ui); err != nil { + return false, nil + } + + lastPhase = ui.Status.Phase + lastConditions = formatConditions(ui.Status.Conditions) + + if ui.Status.Phase == uiv1alpha1.PhaseReady { + return true, nil + } + + for _, cond := range ui.Status.Conditions { + if cond.Type == uiv1alpha1.ConditionTypeReady && cond.Status == metav1.ConditionTrue { + return true, nil + } + } + return false, nil + }) + + if err != nil { + return fmt.Errorf("ScalityUI %s not ready (phase=%s, conditions=%s): %w", name, lastPhase, lastConditions, err) + } + return nil +} + +func WaitForScalityUIComponentConfigured(ctx context.Context, client klient.Client, namespace, name string, timeout time.Duration) error { + var lastConditions string + + err := wait.PollUntilContextTimeout(ctx, DefaultPollInterval, timeout, true, func(ctx context.Context) (bool, error) { + var component uiv1alpha1.ScalityUIComponent + if err := client.Resources(namespace).Get(ctx, name, namespace, &component); err != nil { + return false, nil + } + + lastConditions = formatConditions(component.Status.Conditions) + + for _, cond := range component.Status.Conditions { + if cond.Type == ConditionConfigurationRetrieved && cond.Status == metav1.ConditionTrue { + return true, nil + } + } + return false, nil + }) + + if err != nil { + return fmt.Errorf("ScalityUIComponent %s/%s not configured (conditions=%s): %w", namespace, name, lastConditions, err) + } + return nil +} + +func WaitForScalityUIComponentExposerReady(ctx context.Context, client klient.Client, namespace, name string, timeout time.Duration) error { + var lastConditions string + var missingConditions []string + + err := wait.PollUntilContextTimeout(ctx, DefaultPollInterval, timeout, true, func(ctx context.Context) (bool, error) { + var exposer uiv1alpha1.ScalityUIComponentExposer + if err := client.Resources(namespace).Get(ctx, name, namespace, &exposer); err != nil { + return false, nil + } + + lastConditions = formatConditions(exposer.Status.Conditions) + + requiredConditions := map[string]bool{ + ConditionDependenciesReady: false, + ConditionConfigMapReady: false, + ConditionDeploymentReady: false, + } + + for _, cond := range exposer.Status.Conditions { + if _, exists := requiredConditions[cond.Type]; exists { + if cond.Status == metav1.ConditionTrue { + requiredConditions[cond.Type] = true + } + } + } + + missingConditions = nil + for condType, ready := range requiredConditions { + if !ready { + missingConditions = append(missingConditions, condType) + } + } + + return len(missingConditions) == 0, nil + }) + + if err != nil { + return fmt.Errorf("ScalityUIComponentExposer %s/%s not ready (missing=%v, conditions=%s): %w", + namespace, name, missingConditions, lastConditions, err) + } + return nil +} + +func WaitForServiceExists(ctx context.Context, client klient.Client, namespace, name string, timeout time.Duration) error { + err := wait.PollUntilContextTimeout(ctx, DefaultPollInterval, timeout, true, func(ctx context.Context) (bool, error) { + var service corev1.Service + if err := client.Resources(namespace).Get(ctx, name, namespace, &service); err != nil { + return false, nil + } + return true, nil + }) + + if err != nil { + return fmt.Errorf("Service %s/%s not found: %w", namespace, name, err) + } + return nil +} + +func WaitForConfigMapExists(ctx context.Context, client klient.Client, namespace, name string, timeout time.Duration) error { + err := wait.PollUntilContextTimeout(ctx, DefaultPollInterval, timeout, true, func(ctx context.Context) (bool, error) { + var cm corev1.ConfigMap + if err := client.Resources(namespace).Get(ctx, name, namespace, &cm); err != nil { + return false, nil + } + return true, nil + }) + + if err != nil { + return fmt.Errorf("ConfigMap %s/%s not found: %w", namespace, name, err) + } + return nil +} + +func WaitForDeploymentHasVolume(ctx context.Context, client klient.Client, namespace, name, volumeName string, timeout time.Duration) error { + var foundVolumes []string + + err := wait.PollUntilContextTimeout(ctx, DefaultPollInterval, timeout, true, func(ctx context.Context) (bool, error) { + var deployment appsv1.Deployment + if err := client.Resources(namespace).Get(ctx, name, namespace, &deployment); err != nil { + return false, nil + } + + foundVolumes = nil + for _, vol := range deployment.Spec.Template.Spec.Volumes { + foundVolumes = append(foundVolumes, vol.Name) + if vol.Name == volumeName { + return true, nil + } + } + return false, nil + }) + + if err != nil { + return fmt.Errorf("Deployment %s/%s missing volume %s (found=%v): %w", namespace, name, volumeName, foundVolumes, err) + } + return nil +} + +func WaitForNamespaceDeleted(ctx context.Context, client klient.Client, name string, timeout time.Duration) error { + var lastPhase corev1.NamespacePhase + + err := wait.PollUntilContextTimeout(ctx, DefaultPollInterval, timeout, true, func(ctx context.Context) (bool, error) { + var ns corev1.Namespace + if err := client.Resources().Get(ctx, name, "", &ns); err != nil { + if apierrors.IsNotFound(err) { + return true, nil + } + return false, err + } + lastPhase = ns.Status.Phase + return false, nil + }) + + if err != nil { + return fmt.Errorf("Namespace %s not deleted (phase=%s): %w", name, lastPhase, err) + } + return nil +} + +func GetScalityUIComponent(ctx context.Context, client klient.Client, namespace, name string) (*uiv1alpha1.ScalityUIComponent, error) { + var component uiv1alpha1.ScalityUIComponent + if err := client.Resources(namespace).Get(ctx, name, namespace, &component); err != nil { + return nil, fmt.Errorf("failed to get ScalityUIComponent %s/%s: %w", namespace, name, err) + } + return &component, nil +} + +func DeleteScalityUI(ctx context.Context, client klient.Client, name string) error { + ui := &uiv1alpha1.ScalityUI{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + } + return client.Resources().Delete(ctx, ui) +} + +func formatConditions(conditions []metav1.Condition) string { + if len(conditions) == 0 { + return "none" + } + var parts []string + for _, c := range conditions { + parts = append(parts, fmt.Sprintf("%s=%s", c.Type, c.Status)) + } + return strings.Join(parts, ",") +} + +func WaitForScalityUIComponentCondition(ctx context.Context, client klient.Client, + namespace, name string, conditionType string, expectedStatus metav1.ConditionStatus, + timeout time.Duration) error { + + var lastConditions string + var lastReason string + + err := wait.PollUntilContextTimeout(ctx, DefaultPollInterval, timeout, true, func(ctx context.Context) (bool, error) { + var component uiv1alpha1.ScalityUIComponent + if err := client.Resources(namespace).Get(ctx, name, namespace, &component); err != nil { + return false, nil + } + + lastConditions = formatConditions(component.Status.Conditions) + + for _, cond := range component.Status.Conditions { + if cond.Type == conditionType { + lastReason = cond.Reason + if cond.Status == expectedStatus { + return true, nil + } + } + } + return false, nil + }) + + if err != nil { + return fmt.Errorf("ScalityUIComponent %s/%s condition %s not %s (reason=%s, conditions=%s): %w", + namespace, name, conditionType, expectedStatus, lastReason, lastConditions, err) + } + return nil +} + +func WaitForScalityUIComponentAnnotationAbsent(ctx context.Context, client klient.Client, + namespace, name, annotationKey string, timeout time.Duration) error { + + var lastAnnotations map[string]string + + err := wait.PollUntilContextTimeout(ctx, DefaultPollInterval, timeout, true, func(ctx context.Context) (bool, error) { + var component uiv1alpha1.ScalityUIComponent + if err := client.Resources(namespace).Get(ctx, name, namespace, &component); err != nil { + return false, nil + } + + lastAnnotations = component.Annotations + if component.Annotations == nil { + return true, nil + } + + _, exists := component.Annotations[annotationKey] + return !exists, nil + }) + + if err != nil { + return fmt.Errorf("ScalityUIComponent %s/%s annotation %s still present (annotations=%v): %w", + namespace, name, annotationKey, lastAnnotations, err) + } + return nil +} + +func WaitForMockServerCounter(ctx context.Context, mockClient *MockServerClient, + expected int64, timeout time.Duration) error { + + var lastCounter int64 + var lastErr error + + err := wait.PollUntilContextTimeout(ctx, DefaultPollInterval, timeout, true, func(ctx context.Context) (bool, error) { + counter, err := mockClient.GetCounter(ctx) + if err != nil { + lastErr = err + return false, nil + } + lastErr = nil + lastCounter = counter + return counter >= expected, nil + }) + + if err != nil { + if lastErr != nil { + return fmt.Errorf("mock server counter did not reach %d (last=%d, lastErr=%v): %w", expected, lastCounter, lastErr, err) + } + return fmt.Errorf("mock server counter did not reach %d (last=%d): %w", expected, lastCounter, err) + } + return nil +} + +// GetDeploymentReplicaSets returns all ReplicaSets owned by a Deployment +func GetDeploymentReplicaSets(ctx context.Context, client klient.Client, namespace, deploymentName string) ([]appsv1.ReplicaSet, error) { + var deployment appsv1.Deployment + if err := client.Resources(namespace).Get(ctx, deploymentName, namespace, &deployment); err != nil { + return nil, fmt.Errorf("failed to get deployment %s/%s: %w", namespace, deploymentName, err) + } + + var rsList appsv1.ReplicaSetList + if err := client.Resources(namespace).List(ctx, &rsList); err != nil { + return nil, fmt.Errorf("failed to list replicasets in %s: %w", namespace, err) + } + + var owned []appsv1.ReplicaSet + for _, rs := range rsList.Items { + for _, ref := range rs.OwnerReferences { + if ref.Kind == "Deployment" && ref.Name == deploymentName { + owned = append(owned, rs) + break + } + } + } + return owned, nil +} + +// GetActiveReplicaSet returns the only active ReplicaSet (replicas > 0) for a Deployment +// Returns error if there are multiple active ReplicaSets (rolling update in progress) +func GetActiveReplicaSet(ctx context.Context, client klient.Client, namespace, deploymentName string) (*appsv1.ReplicaSet, error) { + rsList, err := GetDeploymentReplicaSets(ctx, client, namespace, deploymentName) + if err != nil { + return nil, err + } + + var activeRS []appsv1.ReplicaSet + for _, rs := range rsList { + if rs.Status.Replicas > 0 { + activeRS = append(activeRS, rs) + } + } + + if len(activeRS) == 0 { + return nil, fmt.Errorf("no active ReplicaSet found for %s/%s", namespace, deploymentName) + } + if len(activeRS) > 1 { + var names []string + for _, rs := range activeRS { + names = append(names, fmt.Sprintf("%s(replicas=%d)", rs.Name, rs.Status.Replicas)) + } + return nil, fmt.Errorf("multiple active ReplicaSets for %s/%s: %v (rolling update in progress)", namespace, deploymentName, names) + } + + return &activeRS[0], nil +} + +// WaitForDeploymentStable waits for a Deployment to have exactly one active ReplicaSet +func WaitForDeploymentStable(ctx context.Context, client klient.Client, namespace, deploymentName string, timeout time.Duration) (*appsv1.ReplicaSet, error) { + var activeRS *appsv1.ReplicaSet + var lastErr error + + err := wait.PollUntilContextTimeout(ctx, DefaultPollInterval, timeout, true, func(ctx context.Context) (bool, error) { + rs, err := GetActiveReplicaSet(ctx, client, namespace, deploymentName) + if err != nil { + lastErr = err + return false, nil + } + activeRS = rs + lastErr = nil + return true, nil + }) + + if err != nil { + if lastErr != nil { + return nil, fmt.Errorf("deployment %s/%s not stable: %v: %w", namespace, deploymentName, lastErr, err) + } + return nil, fmt.Errorf("deployment %s/%s not stable: %w", namespace, deploymentName, err) + } + return activeRS, nil +} + +// WaitForNewReplicaSet waits for a new ReplicaSet to be created for a Deployment +func WaitForNewReplicaSet(ctx context.Context, client klient.Client, + namespace, deploymentName string, excludeNames []string, timeout time.Duration) (string, error) { + + excludeSet := make(map[string]bool) + for _, name := range excludeNames { + excludeSet[name] = true + } + + var newRSName string + err := wait.PollUntilContextTimeout(ctx, DefaultPollInterval, timeout, true, func(ctx context.Context) (bool, error) { + rsList, err := GetDeploymentReplicaSets(ctx, client, namespace, deploymentName) + if err != nil { + return false, nil + } + + for _, rs := range rsList { + // Only consider active ReplicaSets (replicas > 0) to avoid returning old scaled-down RS + if !excludeSet[rs.Name] && rs.Status.Replicas > 0 { + newRSName = rs.Name + return true, nil + } + } + return false, nil + }) + + if err != nil { + return "", fmt.Errorf("new ReplicaSet not created for %s/%s (excluded=%v): %w", + namespace, deploymentName, excludeNames, err) + } + return newRSName, nil +} + +// WaitForReplicaSetScaledDown waits for a ReplicaSet to have 0 replicas +func WaitForReplicaSetScaledDown(ctx context.Context, client klient.Client, + namespace, rsName string, timeout time.Duration) error { + + var lastReplicas int32 + + err := wait.PollUntilContextTimeout(ctx, DefaultPollInterval, timeout, true, func(ctx context.Context) (bool, error) { + var rs appsv1.ReplicaSet + if err := client.Resources(namespace).Get(ctx, rsName, namespace, &rs); err != nil { + if apierrors.IsNotFound(err) { + return true, nil + } + return false, nil + } + + lastReplicas = rs.Status.Replicas + return rs.Status.Replicas == 0, nil + }) + + if err != nil { + return fmt.Errorf("ReplicaSet %s/%s not scaled down (replicas=%d): %w", + namespace, rsName, lastReplicas, err) + } + return nil +} + +// ResourceRef identifies a Kubernetes resource for version tracking +type ResourceRef struct { + Kind string + Namespace string + Name string +} + +// GetResourceVersion gets the ResourceVersion of a single resource +func GetResourceVersion(ctx context.Context, client klient.Client, ref ResourceRef) (string, error) { + switch ref.Kind { + case "Deployment": + var obj appsv1.Deployment + if err := client.Resources(ref.Namespace).Get(ctx, ref.Name, ref.Namespace, &obj); err != nil { + return "", err + } + return obj.ResourceVersion, nil + case "Service": + var obj corev1.Service + if err := client.Resources(ref.Namespace).Get(ctx, ref.Name, ref.Namespace, &obj); err != nil { + return "", err + } + return obj.ResourceVersion, nil + case "ConfigMap": + var obj corev1.ConfigMap + if err := client.Resources(ref.Namespace).Get(ctx, ref.Name, ref.Namespace, &obj); err != nil { + return "", err + } + return obj.ResourceVersion, nil + default: + return "", fmt.Errorf("unsupported resource kind: %s", ref.Kind) + } +} + +// GetResourceVersions gets ResourceVersions for multiple resources +func GetResourceVersions(ctx context.Context, client klient.Client, refs []ResourceRef) (map[string]string, error) { + result := make(map[string]string) + for _, ref := range refs { + key := fmt.Sprintf("%s/%s/%s", ref.Kind, ref.Namespace, ref.Name) + version, err := GetResourceVersion(ctx, client, ref) + if err != nil { + return nil, fmt.Errorf("failed to get version for %s: %w", key, err) + } + result[key] = version + } + return result, nil +} + +// DeleteOperatorPod deletes the operator pod and waits for a new one to be running +func DeleteOperatorPod(ctx context.Context, client klient.Client, timeout time.Duration) error { + var pods corev1.PodList + if err := client.Resources(OperatorNamespace).List(ctx, &pods); err != nil { + return fmt.Errorf("failed to list pods in %s: %w", OperatorNamespace, err) + } + + var operatorPod *corev1.Pod + for i := range pods.Items { + pod := &pods.Items[i] + if val, ok := pod.Labels[ControlPlaneLabel]; ok && val == ControlPlaneValue { + operatorPod = pod + break + } + } + + if operatorPod == nil { + return fmt.Errorf("operator pod not found in %s", OperatorNamespace) + } + + oldPodName := operatorPod.Name + + if err := client.Resources(OperatorNamespace).Delete(ctx, operatorPod); err != nil { + return fmt.Errorf("failed to delete operator pod %s: %w", oldPodName, err) + } + + err := wait.PollUntilContextTimeout(ctx, DefaultPollInterval, timeout, true, func(ctx context.Context) (bool, error) { + var pods corev1.PodList + if err := client.Resources(OperatorNamespace).List(ctx, &pods); err != nil { + return false, nil + } + + for i := range pods.Items { + pod := &pods.Items[i] + if val, ok := pod.Labels[ControlPlaneLabel]; ok && val == ControlPlaneValue { + if pod.Name != oldPodName && pod.Status.Phase == corev1.PodRunning { + allReady := true + for _, cs := range pod.Status.ContainerStatuses { + if !cs.Ready { + allReady = false + break + } + } + if allReady { + return true, nil + } + } + } + } + return false, nil + }) + + if err != nil { + return fmt.Errorf("new operator pod not running after deleting %s: %w", oldPodName, err) + } + return nil +} + +// GetDeploymentPodTemplateHash gets the hash annotation from a Deployment's pod template +func GetDeploymentPodTemplateHash(ctx context.Context, client klient.Client, + namespace, name, annotationKey string) (string, error) { + + var deployment appsv1.Deployment + if err := client.Resources(namespace).Get(ctx, name, namespace, &deployment); err != nil { + return "", fmt.Errorf("failed to get deployment %s/%s: %w", namespace, name, err) + } + + if deployment.Spec.Template.Annotations == nil { + return "", nil + } + return deployment.Spec.Template.Annotations[annotationKey], nil +} + +// WaitForDeploymentAnnotationChange waits for a Deployment's pod template annotation to change +func WaitForDeploymentAnnotationChange(ctx context.Context, client klient.Client, + namespace, name, annotationKey, oldValue string, timeout time.Duration) (string, error) { + + var newValue string + err := wait.PollUntilContextTimeout(ctx, DefaultPollInterval, timeout, true, func(ctx context.Context) (bool, error) { + var deployment appsv1.Deployment + if err := client.Resources(namespace).Get(ctx, name, namespace, &deployment); err != nil { + return false, nil + } + + if deployment.Spec.Template.Annotations == nil { + return false, nil + } + + newValue = deployment.Spec.Template.Annotations[annotationKey] + return newValue != "" && newValue != oldValue, nil + }) + + if err != nil { + return "", fmt.Errorf("deployment %s/%s annotation %s did not change from %s: %w", + namespace, name, annotationKey, oldValue, err) + } + return newValue, nil +} + +// WaitForDeploymentNoVolume waits for a Deployment to NOT have the specified volume +func WaitForDeploymentNoVolume(ctx context.Context, client klient.Client, + namespace, name, volumeName string, timeout time.Duration) error { + + var foundVolumes []string + + err := wait.PollUntilContextTimeout(ctx, DefaultPollInterval, timeout, true, func(ctx context.Context) (bool, error) { + var deployment appsv1.Deployment + if err := client.Resources(namespace).Get(ctx, name, namespace, &deployment); err != nil { + return false, nil + } + + foundVolumes = nil + for _, vol := range deployment.Spec.Template.Spec.Volumes { + foundVolumes = append(foundVolumes, vol.Name) + if vol.Name == volumeName { + return false, nil + } + } + return true, nil + }) + + if err != nil { + return fmt.Errorf("Deployment %s/%s still has volume %s (found=%v): %w", + namespace, name, volumeName, foundVolumes, err) + } + return nil +} + +// WaitForConfigMapDeleted waits for a ConfigMap to be deleted +func WaitForConfigMapDeleted(ctx context.Context, client klient.Client, + namespace, name string, timeout time.Duration) error { + + err := wait.PollUntilContextTimeout(ctx, DefaultPollInterval, timeout, true, func(ctx context.Context) (bool, error) { + var cm corev1.ConfigMap + if err := client.Resources(namespace).Get(ctx, name, namespace, &cm); err != nil { + if apierrors.IsNotFound(err) { + return true, nil + } + return false, nil + } + return false, nil + }) + + if err != nil { + return fmt.Errorf("ConfigMap %s/%s was not deleted: %w", namespace, name, err) + } + return nil +} + +// GetConfigMapFinalizers gets the finalizers from a ConfigMap +func GetConfigMapFinalizers(ctx context.Context, client klient.Client, + namespace, name string) ([]string, error) { + + var cm corev1.ConfigMap + if err := client.Resources(namespace).Get(ctx, name, namespace, &cm); err != nil { + return nil, fmt.Errorf("failed to get ConfigMap %s/%s: %w", namespace, name, err) + } + return cm.Finalizers, nil +} + +// WaitForConfigMapHasFinalizer waits for a ConfigMap to have a specific finalizer +func WaitForConfigMapHasFinalizer(ctx context.Context, client klient.Client, + namespace, name, finalizer string, timeout time.Duration) error { + + var lastFinalizers []string + + err := wait.PollUntilContextTimeout(ctx, DefaultPollInterval, timeout, true, func(ctx context.Context) (bool, error) { + var cm corev1.ConfigMap + if err := client.Resources(namespace).Get(ctx, name, namespace, &cm); err != nil { + return false, nil + } + + lastFinalizers = cm.Finalizers + for _, f := range cm.Finalizers { + if f == finalizer { + return true, nil + } + } + return false, nil + }) + + if err != nil { + return fmt.Errorf("ConfigMap %s/%s does not have finalizer %s (found=%v): %w", + namespace, name, finalizer, lastFinalizers, err) + } + return nil +} + +// WaitForConfigMapNoFinalizer waits for a ConfigMap to NOT have a specific finalizer +func WaitForConfigMapNoFinalizer(ctx context.Context, client klient.Client, + namespace, name, finalizer string, timeout time.Duration) error { + + var lastFinalizers []string + + err := wait.PollUntilContextTimeout(ctx, DefaultPollInterval, timeout, true, func(ctx context.Context) (bool, error) { + var cm corev1.ConfigMap + if err := client.Resources(namespace).Get(ctx, name, namespace, &cm); err != nil { + if apierrors.IsNotFound(err) { + return true, nil + } + return false, nil + } + + lastFinalizers = cm.Finalizers + for _, f := range cm.Finalizers { + if f == finalizer { + return false, nil + } + } + return true, nil + }) + + if err != nil { + return fmt.Errorf("ConfigMap %s/%s still has finalizer %s (found=%v): %w", + namespace, name, finalizer, lastFinalizers, err) + } + return nil +} + +// WaitForDeploymentHasVolumeMount waits for a Deployment container to have a specific volume mount +func WaitForDeploymentHasVolumeMount(ctx context.Context, client klient.Client, + namespace, deploymentName, volumeName, expectedMountPath string, timeout time.Duration) error { + + var foundMounts []string + + err := wait.PollUntilContextTimeout(ctx, DefaultPollInterval, timeout, true, func(ctx context.Context) (bool, error) { + var deployment appsv1.Deployment + if err := client.Resources(namespace).Get(ctx, deploymentName, namespace, &deployment); err != nil { + return false, nil + } + + foundMounts = nil + for _, container := range deployment.Spec.Template.Spec.Containers { + for _, mount := range container.VolumeMounts { + foundMounts = append(foundMounts, fmt.Sprintf("%s->%s", mount.Name, mount.MountPath)) + if mount.Name == volumeName && mount.MountPath == expectedMountPath { + return true, nil + } + } + } + return false, nil + }) + + if err != nil { + return fmt.Errorf("Deployment %s/%s missing volume mount %s at %s (found=%v): %w", + namespace, deploymentName, volumeName, expectedMountPath, foundMounts, err) + } + return nil +} + +type DeployedApp struct { + AppHistoryBasePath string `json:"appHistoryBasePath"` + Kind string `json:"kind"` + Name string `json:"name"` + URL string `json:"url"` + Version string `json:"version"` +} + +const deployedUIAppsKey = "deployed-ui-apps.json" + +func GetDeployedApps(ctx context.Context, client klient.Client, scalityUIName string) ([]DeployedApp, error) { + configMapName := scalityUIName + "-deployed-ui-apps" + + var cm corev1.ConfigMap + if err := client.Resources(OperatorNamespace).Get(ctx, configMapName, OperatorNamespace, &cm); err != nil { + return nil, fmt.Errorf("failed to get deployed-ui-apps ConfigMap %s/%s: %w", + OperatorNamespace, configMapName, err) + } + + jsonData, ok := cm.Data[deployedUIAppsKey] + if !ok { + return nil, fmt.Errorf("ConfigMap %s/%s missing key %s", + OperatorNamespace, configMapName, deployedUIAppsKey) + } + + var apps []DeployedApp + if err := json.Unmarshal([]byte(jsonData), &apps); err != nil { + return nil, fmt.Errorf("failed to parse deployed-ui-apps JSON: %w", err) + } + + return apps, nil +} + +func WaitForDeployedAppsContains(ctx context.Context, client klient.Client, + scalityUIName, componentName string, timeout time.Duration) error { + + var lastApps []string + + err := wait.PollUntilContextTimeout(ctx, DefaultPollInterval, timeout, true, func(ctx context.Context) (bool, error) { + apps, err := GetDeployedApps(ctx, client, scalityUIName) + if err != nil { + return false, nil + } + + lastApps = nil + for _, app := range apps { + lastApps = append(lastApps, app.Name) + if app.Name == componentName { + return true, nil + } + } + return false, nil + }) + + if err != nil { + return fmt.Errorf("deployed-apps for %s does not contain %s (found=%v): %w", + scalityUIName, componentName, lastApps, err) + } + return nil +} + +func WaitForDeployedAppsNotContains(ctx context.Context, client klient.Client, + scalityUIName, componentName string, timeout time.Duration) error { + + var lastApps []string + + err := wait.PollUntilContextTimeout(ctx, DefaultPollInterval, timeout, true, func(ctx context.Context) (bool, error) { + apps, err := GetDeployedApps(ctx, client, scalityUIName) + if err != nil { + if apierrors.IsNotFound(err) { + return true, nil + } + return false, nil + } + + lastApps = nil + for _, app := range apps { + lastApps = append(lastApps, app.Name) + if app.Name == componentName { + return false, nil + } + } + return true, nil + }) + + if err != nil { + return fmt.Errorf("deployed-apps for %s still contains %s (apps=%v): %w", + scalityUIName, componentName, lastApps, err) + } + return nil +} + +func WaitForDeployedAppsCount(ctx context.Context, client klient.Client, + scalityUIName string, expectedCount int, timeout time.Duration) error { + + var lastCount int + var lastApps []string + + err := wait.PollUntilContextTimeout(ctx, DefaultPollInterval, timeout, true, func(ctx context.Context) (bool, error) { + apps, err := GetDeployedApps(ctx, client, scalityUIName) + if err != nil { + return false, nil + } + + lastCount = len(apps) + lastApps = nil + for _, app := range apps { + lastApps = append(lastApps, app.Name) + } + return lastCount == expectedCount, nil + }) + + if err != nil { + return fmt.Errorf("deployed-apps for %s count mismatch: expected %d, got %d (apps=%v): %w", + scalityUIName, expectedCount, lastCount, lastApps, err) + } + return nil +} diff --git a/test/e2e/mock-server/Dockerfile b/test/e2e/mock-server/Dockerfile new file mode 100644 index 0000000..38dd515 --- /dev/null +++ b/test/e2e/mock-server/Dockerfile @@ -0,0 +1,12 @@ +FROM golang:1.24-alpine AS builder + +WORKDIR /app +COPY main.go . +RUN CGO_ENABLED=0 go build -o mock-server main.go + +FROM alpine:3.21 + +COPY --from=builder /app/mock-server /mock-server +EXPOSE 80 + +CMD ["/mock-server"] diff --git a/test/e2e/mock-server/main.go b/test/e2e/mock-server/main.go new file mode 100644 index 0000000..bd67b14 --- /dev/null +++ b/test/e2e/mock-server/main.go @@ -0,0 +1,163 @@ +package main + +import ( + "encoding/json" + "fmt" + "log" + "net/http" + "os" + "sync" + "sync/atomic" + "time" +) + +type MockConfig struct { + Delay time.Duration + StatusCode int + Response string +} + +type Server struct { + mu sync.RWMutex + config MockConfig + counter atomic.Int64 +} + +func NewServer() *Server { + return &Server{ + config: MockConfig{ + Delay: 0, + StatusCode: http.StatusOK, + Response: defaultMicroAppConfig(), + }, + } +} + +func defaultMicroAppConfig() string { + return `{ + "kind": "MicroAppRuntimeConfiguration", + "apiVersion": "ui.scality.com/v1alpha1", + "metadata": { + "kind": "shell", + "name": "mock-component" + }, + "spec": { + "version": "1.0.0", + "publicPath": "/mock/", + "module": "./MicroApp", + "views": { + "main": { + "path": "/mock", + "label": {"en": "Mock"} + } + } + } +}` +} + +func (s *Server) handleMicroAppConfig(w http.ResponseWriter, r *http.Request) { + s.counter.Add(1) + log.Printf("Request #%d: %s %s", s.counter.Load(), r.Method, r.URL.Path) + + s.mu.RLock() + config := s.config + s.mu.RUnlock() + + if config.Delay > 0 { + time.Sleep(config.Delay) + } + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(config.StatusCode) + w.Write([]byte(config.Response)) +} + +func (s *Server) handleCounter(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodGet { + http.Error(w, "Method not allowed", http.StatusMethodNotAllowed) + return + } + + count := s.counter.Load() + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(map[string]int64{"count": count}) +} + +func (s *Server) handleReset(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodPost { + http.Error(w, "Method not allowed", http.StatusMethodNotAllowed) + return + } + + s.counter.Store(0) + s.mu.Lock() + s.config = MockConfig{ + Delay: 0, + StatusCode: http.StatusOK, + Response: defaultMicroAppConfig(), + } + s.mu.Unlock() + + log.Println("Server state reset") + w.Header().Set("Content-Type", "application/json") + w.Write([]byte(`{"status": "reset"}`)) +} + +func (s *Server) handleConfig(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodPost { + http.Error(w, "Method not allowed", http.StatusMethodNotAllowed) + return + } + + var req struct { + Delay int `json:"delay"` + StatusCode int `json:"statusCode"` + Response string `json:"response"` + } + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + + s.mu.Lock() + if req.Delay > 0 { + s.config.Delay = time.Duration(req.Delay) * time.Millisecond + } + if req.StatusCode > 0 { + s.config.StatusCode = req.StatusCode + } + if req.Response != "" { + s.config.Response = req.Response + } + s.mu.Unlock() + + log.Printf("Config updated: delay=%dms, statusCode=%d", req.Delay, req.StatusCode) + w.Header().Set("Content-Type", "application/json") + w.Write([]byte(`{"status": "updated"}`)) +} + +func (s *Server) handleHealth(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + w.Write([]byte(`{"status": "healthy"}`)) +} + +func main() { + port := os.Getenv("PORT") + if port == "" { + port = "80" + } + + server := NewServer() + + http.HandleFunc("/.well-known/micro-app-configuration", server.handleMicroAppConfig) + http.HandleFunc("/_/counter", server.handleCounter) + http.HandleFunc("/_/reset", server.handleReset) + http.HandleFunc("/_/config", server.handleConfig) + http.HandleFunc("/healthz", server.handleHealth) + + addr := fmt.Sprintf(":%s", port) + log.Printf("Mock server starting on %s", addr) + if err := http.ListenAndServe(addr, nil); err != nil { + log.Fatalf("Server failed: %v", err) + } +} diff --git a/test/e2e/multi_namespace_test.go b/test/e2e/multi_namespace_test.go new file mode 100644 index 0000000..b3364a3 --- /dev/null +++ b/test/e2e/multi_namespace_test.go @@ -0,0 +1,572 @@ +/* +Copyright 2025. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "context" + "testing" + + "github.com/scality/ui-operator/test/e2e/framework" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/e2e-framework/pkg/envconf" + "sigs.k8s.io/e2e-framework/pkg/features" +) + +type multiNamespaceContextKey string + +const ( + multiNamespaceNSAKey multiNamespaceContextKey = "multi-ns-a" + multiNamespaceNSBKey multiNamespaceContextKey = "multi-ns-b" + multiNamespaceScalityUIKey multiNamespaceContextKey = "multi-ns-scalityui" + multiNamespaceCompA1Key multiNamespaceContextKey = "multi-ns-comp-a1" + multiNamespaceCompA2Key multiNamespaceContextKey = "multi-ns-comp-a2" + multiNamespaceCompB1Key multiNamespaceContextKey = "multi-ns-comp-b1" + multiNamespaceExpA1Key multiNamespaceContextKey = "multi-ns-exp-a1" + multiNamespaceExpA2Key multiNamespaceContextKey = "multi-ns-exp-a2" + multiNamespaceExpB1Key multiNamespaceContextKey = "multi-ns-exp-b1" +) + +func TestMultiNamespace_MultipleComponentsAggregation(t *testing.T) { + feature := features.New("multi-namespace-components-aggregation"). + Setup(func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + client := cfg.Client() + + nsA := envconf.RandomName("multi-ns-a", 16) + nsB := envconf.RandomName("multi-ns-b", 16) + scalityUIName := envconf.RandomName("multi-ns-agg-ui", 24) + compA1Name := envconf.RandomName("comp-a1", 16) + compA2Name := envconf.RandomName("comp-a2", 16) + compB1Name := envconf.RandomName("comp-b1", 16) + expA1Name := envconf.RandomName("exp-a1", 16) + expA2Name := envconf.RandomName("exp-a2", 16) + expB1Name := envconf.RandomName("exp-b1", 16) + + for _, nsName := range []string{nsA, nsB} { + ns := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{Name: nsName}, + } + if err := client.Resources().Create(ctx, ns); err != nil { + t.Fatalf("Failed to create namespace %s: %v", nsName, err) + } + t.Logf("Created namespace %s", nsName) + } + + ctx = context.WithValue(ctx, multiNamespaceNSAKey, nsA) + ctx = context.WithValue(ctx, multiNamespaceNSBKey, nsB) + ctx = context.WithValue(ctx, multiNamespaceScalityUIKey, scalityUIName) + ctx = context.WithValue(ctx, multiNamespaceCompA1Key, compA1Name) + ctx = context.WithValue(ctx, multiNamespaceCompA2Key, compA2Name) + ctx = context.WithValue(ctx, multiNamespaceCompB1Key, compB1Name) + ctx = context.WithValue(ctx, multiNamespaceExpA1Key, expA1Name) + ctx = context.WithValue(ctx, multiNamespaceExpA2Key, expA2Name) + ctx = context.WithValue(ctx, multiNamespaceExpB1Key, expB1Name) + return ctx + }). + Assess("create ScalityUI", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + client := cfg.Client() + scalityUIName := ctx.Value(multiNamespaceScalityUIKey).(string) + + if err := framework.NewScalityUIBuilder(scalityUIName). + WithProductName("Multi-Namespace Aggregation Test"). + Create(ctx, client); err != nil { + t.Fatalf("Failed to create ScalityUI: %v", err) + } + t.Logf("Created ScalityUI %s", scalityUIName) + + if err := framework.WaitForScalityUIReady(ctx, client, scalityUIName, framework.LongTimeout); err != nil { + t.Fatalf("ScalityUI not ready: %v", err) + } + t.Logf("ScalityUI is ready") + + return ctx + }). + Assess("create 2 Components in ns-a", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + client := cfg.Client() + nsA := ctx.Value(multiNamespaceNSAKey).(string) + compA1Name := ctx.Value(multiNamespaceCompA1Key).(string) + compA2Name := ctx.Value(multiNamespaceCompA2Key).(string) + + for _, compName := range []string{compA1Name, compA2Name} { + if err := framework.NewScalityUIComponentBuilder(compName, nsA). + WithImage(framework.MockServerImage). + Create(ctx, client); err != nil { + t.Fatalf("Failed to create ScalityUIComponent %s: %v", compName, err) + } + t.Logf("Created ScalityUIComponent %s in %s", compName, nsA) + + if err := framework.WaitForDeploymentReady(ctx, client, nsA, compName, framework.LongTimeout); err != nil { + t.Fatalf("Component %s deployment not ready: %v", compName, err) + } + + if err := framework.WaitForScalityUIComponentConfigured(ctx, client, nsA, compName, framework.LongTimeout); err != nil { + t.Fatalf("Component %s not configured: %v", compName, err) + } + t.Logf("Component %s ready and configured", compName) + } + + return ctx + }). + Assess("create 1 Component in ns-b", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + client := cfg.Client() + nsB := ctx.Value(multiNamespaceNSBKey).(string) + compB1Name := ctx.Value(multiNamespaceCompB1Key).(string) + + if err := framework.NewScalityUIComponentBuilder(compB1Name, nsB). + WithImage(framework.MockServerImage). + Create(ctx, client); err != nil { + t.Fatalf("Failed to create ScalityUIComponent %s: %v", compB1Name, err) + } + t.Logf("Created ScalityUIComponent %s in %s", compB1Name, nsB) + + if err := framework.WaitForDeploymentReady(ctx, client, nsB, compB1Name, framework.LongTimeout); err != nil { + t.Fatalf("Component %s deployment not ready: %v", compB1Name, err) + } + + if err := framework.WaitForScalityUIComponentConfigured(ctx, client, nsB, compB1Name, framework.LongTimeout); err != nil { + t.Fatalf("Component %s not configured: %v", compB1Name, err) + } + t.Logf("Component %s ready and configured", compB1Name) + + return ctx + }). + Assess("create 2 Exposers in ns-a", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + client := cfg.Client() + nsA := ctx.Value(multiNamespaceNSAKey).(string) + scalityUIName := ctx.Value(multiNamespaceScalityUIKey).(string) + compA1Name := ctx.Value(multiNamespaceCompA1Key).(string) + compA2Name := ctx.Value(multiNamespaceCompA2Key).(string) + expA1Name := ctx.Value(multiNamespaceExpA1Key).(string) + expA2Name := ctx.Value(multiNamespaceExpA2Key).(string) + + exposers := []struct { + name string + component string + basePath string + }{ + {expA1Name, compA1Name, "/app-a1"}, + {expA2Name, compA2Name, "/app-a2"}, + } + + for _, exp := range exposers { + if err := framework.NewScalityUIComponentExposerBuilder(exp.name, nsA). + WithScalityUI(scalityUIName). + WithScalityUIComponent(exp.component). + WithAppHistoryBasePath(exp.basePath). + Create(ctx, client); err != nil { + t.Fatalf("Failed to create Exposer %s: %v", exp.name, err) + } + t.Logf("Created ScalityUIComponentExposer %s", exp.name) + + if err := framework.WaitForScalityUIComponentExposerReady(ctx, client, nsA, exp.name, framework.LongTimeout); err != nil { + t.Fatalf("Exposer %s not ready: %v", exp.name, err) + } + t.Logf("Exposer %s is ready", exp.name) + } + + return ctx + }). + Assess("create 1 Exposer in ns-b", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + client := cfg.Client() + nsB := ctx.Value(multiNamespaceNSBKey).(string) + scalityUIName := ctx.Value(multiNamespaceScalityUIKey).(string) + compB1Name := ctx.Value(multiNamespaceCompB1Key).(string) + expB1Name := ctx.Value(multiNamespaceExpB1Key).(string) + + if err := framework.NewScalityUIComponentExposerBuilder(expB1Name, nsB). + WithScalityUI(scalityUIName). + WithScalityUIComponent(compB1Name). + WithAppHistoryBasePath("/app-b1"). + Create(ctx, client); err != nil { + t.Fatalf("Failed to create Exposer %s: %v", expB1Name, err) + } + t.Logf("Created ScalityUIComponentExposer %s", expB1Name) + + if err := framework.WaitForScalityUIComponentExposerReady(ctx, client, nsB, expB1Name, framework.LongTimeout); err != nil { + t.Fatalf("Exposer %s not ready: %v", expB1Name, err) + } + t.Logf("Exposer %s is ready", expB1Name) + + return ctx + }). + Assess("verify deployed-apps contains all 3 components", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + client := cfg.Client() + scalityUIName := ctx.Value(multiNamespaceScalityUIKey).(string) + compA1Name := ctx.Value(multiNamespaceCompA1Key).(string) + compA2Name := ctx.Value(multiNamespaceCompA2Key).(string) + compB1Name := ctx.Value(multiNamespaceCompB1Key).(string) + + if err := framework.WaitForDeployedAppsCount(ctx, client, scalityUIName, 3, framework.LongTimeout); err != nil { + t.Fatalf("Deployed-apps count mismatch: %v", err) + } + t.Logf("Verified: deployed-apps contains 3 components") + + for _, compName := range []string{compA1Name, compA2Name, compB1Name} { + if err := framework.WaitForDeployedAppsContains(ctx, client, scalityUIName, compName, framework.DefaultTimeout); err != nil { + t.Fatalf("Component %s not in deployed-apps: %v", compName, err) + } + t.Logf("Verified: Component %s is in deployed-apps", compName) + } + + apps, err := framework.GetDeployedApps(ctx, client, scalityUIName) + if err != nil { + t.Fatalf("Failed to get deployed-apps: %v", err) + } + + expectedBasePaths := map[string]string{ + compA1Name: "/app-a1", + compA2Name: "/app-a2", + compB1Name: "/app-b1", + } + for _, app := range apps { + expectedPath, ok := expectedBasePaths[app.Name] + if !ok { + t.Errorf("Unexpected component in deployed-apps: %s", app.Name) + continue + } + if app.AppHistoryBasePath != expectedPath { + t.Errorf("Component %s has wrong AppHistoryBasePath: expected %s, got %s", + app.Name, expectedPath, app.AppHistoryBasePath) + } else { + t.Logf("Verified: Component %s has correct AppHistoryBasePath=%s", app.Name, app.AppHistoryBasePath) + } + } + + return ctx + }). + Assess("verify runtime ConfigMaps exist in both namespaces", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + client := cfg.Client() + nsA := ctx.Value(multiNamespaceNSAKey).(string) + nsB := ctx.Value(multiNamespaceNSBKey).(string) + compA1Name := ctx.Value(multiNamespaceCompA1Key).(string) + compA2Name := ctx.Value(multiNamespaceCompA2Key).(string) + compB1Name := ctx.Value(multiNamespaceCompB1Key).(string) + + nsAConfigMaps := []string{ + compA1Name + framework.RuntimeConfigMapSuffix, + compA2Name + framework.RuntimeConfigMapSuffix, + } + for _, cmName := range nsAConfigMaps { + if err := framework.WaitForConfigMapExists(ctx, client, nsA, cmName, framework.DefaultTimeout); err != nil { + t.Fatalf("ConfigMap %s/%s not found: %v", nsA, cmName, err) + } + t.Logf("Verified: ConfigMap %s/%s exists", nsA, cmName) + } + + nsBConfigMap := compB1Name + framework.RuntimeConfigMapSuffix + if err := framework.WaitForConfigMapExists(ctx, client, nsB, nsBConfigMap, framework.DefaultTimeout); err != nil { + t.Fatalf("ConfigMap %s/%s not found: %v", nsB, nsBConfigMap, err) + } + t.Logf("Verified: ConfigMap %s/%s exists", nsB, nsBConfigMap) + + return ctx + }). + Teardown(func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + client := cfg.Client() + nsA := ctx.Value(multiNamespaceNSAKey).(string) + nsB := ctx.Value(multiNamespaceNSBKey).(string) + scalityUIName := ctx.Value(multiNamespaceScalityUIKey).(string) + + if err := framework.DeleteScalityUI(ctx, client, scalityUIName); err != nil { + t.Logf("Warning: Failed to delete ScalityUI: %v", err) + } else { + t.Logf("Deleted ScalityUI %s", scalityUIName) + } + + for _, nsName := range []string{nsA, nsB} { + ns := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: nsName}} + if err := client.Resources().Delete(ctx, ns); err != nil { + t.Logf("Warning: Failed to delete namespace %s: %v", nsName, err) + } else { + t.Logf("Deleted namespace %s", nsName) + } + } + + return ctx + }). + Feature() + + testenv.Test(t, feature) +} + +func TestMultiNamespace_PartialNamespaceDeletion(t *testing.T) { + feature := features.New("multi-namespace-partial-deletion"). + Setup(func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + client := cfg.Client() + + nsA := envconf.RandomName("partial-ns-a", 16) + nsB := envconf.RandomName("partial-ns-b", 16) + scalityUIName := envconf.RandomName("partial-del-ui", 24) + compA1Name := envconf.RandomName("comp-a1", 16) + compA2Name := envconf.RandomName("comp-a2", 16) + compB1Name := envconf.RandomName("comp-b1", 16) + expA1Name := envconf.RandomName("exp-a1", 16) + expA2Name := envconf.RandomName("exp-a2", 16) + expB1Name := envconf.RandomName("exp-b1", 16) + + for _, nsName := range []string{nsA, nsB} { + ns := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{Name: nsName}, + } + if err := client.Resources().Create(ctx, ns); err != nil { + t.Fatalf("Failed to create namespace %s: %v", nsName, err) + } + t.Logf("Created namespace %s", nsName) + } + + ctx = context.WithValue(ctx, multiNamespaceNSAKey, nsA) + ctx = context.WithValue(ctx, multiNamespaceNSBKey, nsB) + ctx = context.WithValue(ctx, multiNamespaceScalityUIKey, scalityUIName) + ctx = context.WithValue(ctx, multiNamespaceCompA1Key, compA1Name) + ctx = context.WithValue(ctx, multiNamespaceCompA2Key, compA2Name) + ctx = context.WithValue(ctx, multiNamespaceCompB1Key, compB1Name) + ctx = context.WithValue(ctx, multiNamespaceExpA1Key, expA1Name) + ctx = context.WithValue(ctx, multiNamespaceExpA2Key, expA2Name) + ctx = context.WithValue(ctx, multiNamespaceExpB1Key, expB1Name) + return ctx + }). + Assess("create ScalityUI", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + client := cfg.Client() + scalityUIName := ctx.Value(multiNamespaceScalityUIKey).(string) + + if err := framework.NewScalityUIBuilder(scalityUIName). + WithProductName("Multi-Namespace Partial Deletion Test"). + Create(ctx, client); err != nil { + t.Fatalf("Failed to create ScalityUI: %v", err) + } + t.Logf("Created ScalityUI %s", scalityUIName) + + if err := framework.WaitForScalityUIReady(ctx, client, scalityUIName, framework.LongTimeout); err != nil { + t.Fatalf("ScalityUI not ready: %v", err) + } + t.Logf("ScalityUI is ready") + + return ctx + }). + Assess("create Components and Exposers in both namespaces", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + client := cfg.Client() + nsA := ctx.Value(multiNamespaceNSAKey).(string) + nsB := ctx.Value(multiNamespaceNSBKey).(string) + scalityUIName := ctx.Value(multiNamespaceScalityUIKey).(string) + compA1Name := ctx.Value(multiNamespaceCompA1Key).(string) + compA2Name := ctx.Value(multiNamespaceCompA2Key).(string) + compB1Name := ctx.Value(multiNamespaceCompB1Key).(string) + expA1Name := ctx.Value(multiNamespaceExpA1Key).(string) + expA2Name := ctx.Value(multiNamespaceExpA2Key).(string) + expB1Name := ctx.Value(multiNamespaceExpB1Key).(string) + + nsAComponents := []string{compA1Name, compA2Name} + for _, compName := range nsAComponents { + if err := framework.NewScalityUIComponentBuilder(compName, nsA). + WithImage(framework.MockServerImage). + Create(ctx, client); err != nil { + t.Fatalf("Failed to create ScalityUIComponent %s: %v", compName, err) + } + t.Logf("Created ScalityUIComponent %s in %s", compName, nsA) + + if err := framework.WaitForDeploymentReady(ctx, client, nsA, compName, framework.LongTimeout); err != nil { + t.Fatalf("Component %s deployment not ready: %v", compName, err) + } + + if err := framework.WaitForScalityUIComponentConfigured(ctx, client, nsA, compName, framework.LongTimeout); err != nil { + t.Fatalf("Component %s not configured: %v", compName, err) + } + t.Logf("Component %s ready and configured", compName) + } + + if err := framework.NewScalityUIComponentBuilder(compB1Name, nsB). + WithImage(framework.MockServerImage). + Create(ctx, client); err != nil { + t.Fatalf("Failed to create ScalityUIComponent %s: %v", compB1Name, err) + } + t.Logf("Created ScalityUIComponent %s in %s", compB1Name, nsB) + + if err := framework.WaitForDeploymentReady(ctx, client, nsB, compB1Name, framework.LongTimeout); err != nil { + t.Fatalf("Component %s deployment not ready: %v", compB1Name, err) + } + + if err := framework.WaitForScalityUIComponentConfigured(ctx, client, nsB, compB1Name, framework.LongTimeout); err != nil { + t.Fatalf("Component %s not configured: %v", compB1Name, err) + } + t.Logf("Component %s ready and configured", compB1Name) + + nsAExposers := []struct { + name string + component string + basePath string + }{ + {expA1Name, compA1Name, "/app-a1"}, + {expA2Name, compA2Name, "/app-a2"}, + } + + for _, exp := range nsAExposers { + if err := framework.NewScalityUIComponentExposerBuilder(exp.name, nsA). + WithScalityUI(scalityUIName). + WithScalityUIComponent(exp.component). + WithAppHistoryBasePath(exp.basePath). + Create(ctx, client); err != nil { + t.Fatalf("Failed to create Exposer %s: %v", exp.name, err) + } + t.Logf("Created ScalityUIComponentExposer %s", exp.name) + + if err := framework.WaitForScalityUIComponentExposerReady(ctx, client, nsA, exp.name, framework.LongTimeout); err != nil { + t.Fatalf("Exposer %s not ready: %v", exp.name, err) + } + t.Logf("Exposer %s is ready", exp.name) + } + + if err := framework.NewScalityUIComponentExposerBuilder(expB1Name, nsB). + WithScalityUI(scalityUIName). + WithScalityUIComponent(compB1Name). + WithAppHistoryBasePath("/app-b1"). + Create(ctx, client); err != nil { + t.Fatalf("Failed to create Exposer %s: %v", expB1Name, err) + } + t.Logf("Created ScalityUIComponentExposer %s", expB1Name) + + if err := framework.WaitForScalityUIComponentExposerReady(ctx, client, nsB, expB1Name, framework.LongTimeout); err != nil { + t.Fatalf("Exposer %s not ready: %v", expB1Name, err) + } + t.Logf("Exposer %s is ready", expB1Name) + + return ctx + }). + Assess("verify deployed-apps contains all 3 components", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + client := cfg.Client() + scalityUIName := ctx.Value(multiNamespaceScalityUIKey).(string) + + if err := framework.WaitForDeployedAppsCount(ctx, client, scalityUIName, 3, framework.LongTimeout); err != nil { + t.Fatalf("Deployed-apps count mismatch: %v", err) + } + t.Logf("Verified: deployed-apps contains 3 components before deletion") + + apps, err := framework.GetDeployedApps(ctx, client, scalityUIName) + if err != nil { + t.Fatalf("Failed to get deployed-apps: %v", err) + } + t.Logf("Deployed apps before ns-a deletion: %+v", apps) + + return ctx + }). + Assess("delete namespace ns-a", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + client := cfg.Client() + nsA := ctx.Value(multiNamespaceNSAKey).(string) + + ns := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{Name: nsA}, + } + if err := client.Resources().Delete(ctx, ns); err != nil { + t.Fatalf("Failed to delete namespace %s: %v", nsA, err) + } + t.Logf("Triggered deletion of namespace %s", nsA) + + return ctx + }). + Assess("wait for namespace ns-a deletion", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + client := cfg.Client() + nsA := ctx.Value(multiNamespaceNSAKey).(string) + + if err := framework.WaitForNamespaceDeleted(ctx, client, nsA, framework.LongTimeout); err != nil { + t.Fatalf("Namespace %s not deleted: %v", nsA, err) + } + t.Logf("Namespace %s fully deleted", nsA) + + return ctx + }). + Assess("verify deployed-apps updated correctly", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + client := cfg.Client() + scalityUIName := ctx.Value(multiNamespaceScalityUIKey).(string) + compA1Name := ctx.Value(multiNamespaceCompA1Key).(string) + compA2Name := ctx.Value(multiNamespaceCompA2Key).(string) + compB1Name := ctx.Value(multiNamespaceCompB1Key).(string) + + if err := framework.WaitForDeployedAppsCount(ctx, client, scalityUIName, 1, framework.LongTimeout); err != nil { + t.Fatalf("Deployed-apps count should be 1 after ns-a deletion: %v", err) + } + t.Logf("Verified: deployed-apps count is 1 after ns-a deletion") + + for _, compName := range []string{compA1Name, compA2Name} { + if err := framework.WaitForDeployedAppsNotContains(ctx, client, scalityUIName, compName, framework.DefaultTimeout); err != nil { + t.Fatalf("Component %s still in deployed-apps after ns-a deletion: %v", compName, err) + } + t.Logf("Verified: Component %s removed from deployed-apps", compName) + } + + if err := framework.WaitForDeployedAppsContains(ctx, client, scalityUIName, compB1Name, framework.DefaultTimeout); err != nil { + t.Fatalf("Component %s should still be in deployed-apps: %v", compB1Name, err) + } + t.Logf("Verified: Component %s still in deployed-apps", compB1Name) + + apps, err := framework.GetDeployedApps(ctx, client, scalityUIName) + if err != nil { + t.Fatalf("Failed to get deployed-apps: %v", err) + } + t.Logf("Deployed apps after ns-a deletion: %+v", apps) + + return ctx + }). + Assess("verify ns-b resources unaffected", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + client := cfg.Client() + nsB := ctx.Value(multiNamespaceNSBKey).(string) + compB1Name := ctx.Value(multiNamespaceCompB1Key).(string) + expB1Name := ctx.Value(multiNamespaceExpB1Key).(string) + + if err := framework.WaitForDeploymentReady(ctx, client, nsB, compB1Name, framework.DefaultTimeout); err != nil { + t.Fatalf("Component %s deployment in ns-b should still be ready: %v", compB1Name, err) + } + t.Logf("Verified: Component %s deployment in ns-b is still running", compB1Name) + + if err := framework.WaitForScalityUIComponentExposerReady(ctx, client, nsB, expB1Name, framework.DefaultTimeout); err != nil { + t.Fatalf("Exposer %s in ns-b should still be ready: %v", expB1Name, err) + } + t.Logf("Verified: Exposer %s in ns-b is still ready", expB1Name) + + nsBConfigMap := compB1Name + framework.RuntimeConfigMapSuffix + if err := framework.WaitForConfigMapExists(ctx, client, nsB, nsBConfigMap, framework.DefaultTimeout); err != nil { + t.Fatalf("ConfigMap %s/%s should still exist: %v", nsB, nsBConfigMap, err) + } + t.Logf("Verified: ConfigMap %s/%s still exists after ns-a deletion", nsB, nsBConfigMap) + + return ctx + }). + Teardown(func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + client := cfg.Client() + nsA := ctx.Value(multiNamespaceNSAKey).(string) + nsB := ctx.Value(multiNamespaceNSBKey).(string) + scalityUIName := ctx.Value(multiNamespaceScalityUIKey).(string) + + if err := framework.DeleteScalityUI(ctx, client, scalityUIName); err != nil { + t.Logf("Warning: Failed to delete ScalityUI: %v", err) + } else { + t.Logf("Deleted ScalityUI %s", scalityUIName) + } + + for _, nsName := range []string{nsA, nsB} { + ns := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: nsName}} + if err := client.Resources().Delete(ctx, ns); err != nil { + t.Logf("Warning: Failed to delete namespace %s: %v", nsName, err) + } else { + t.Logf("Deleted namespace %s", nsName) + } + } + + return ctx + }). + Feature() + + testenv.Test(t, feature) +} diff --git a/test/e2e/namespace_deletion_test.go b/test/e2e/namespace_deletion_test.go new file mode 100644 index 0000000..d5bc72a --- /dev/null +++ b/test/e2e/namespace_deletion_test.go @@ -0,0 +1,202 @@ +/* +Copyright 2025. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "context" + "testing" + + "github.com/scality/ui-operator/test/e2e/framework" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/e2e-framework/pkg/envconf" + "sigs.k8s.io/e2e-framework/pkg/features" +) + +type namespaceDeletionContextKey string + +const ( + namespaceDeletionNamespaceKey namespaceDeletionContextKey = "namespace-deletion-namespace" + namespaceDeletionScalityUIKey namespaceDeletionContextKey = "namespace-deletion-scalityui" + namespaceDeletionComponentKey namespaceDeletionContextKey = "namespace-deletion-component" + namespaceDeletionExposerKey namespaceDeletionContextKey = "namespace-deletion-exposer" +) + +func TestNamespaceDeletion_CascadeCleanup(t *testing.T) { + feature := features.New("namespace-deletion-cascade-cleanup"). + Setup(func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + client := cfg.Client() + testNamespace := envconf.RandomName("ns-deletion", 16) + scalityUIName := envconf.RandomName("cascade-cleanup-ui", 24) + componentName := envconf.RandomName("cascade-cleanup-comp", 24) + exposerName := envconf.RandomName("cascade-cleanup-exp", 24) + + ns := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{Name: testNamespace}, + } + if err := client.Resources().Create(ctx, ns); err != nil { + t.Fatalf("Failed to create namespace: %v", err) + } + t.Logf("Created namespace %s", testNamespace) + + ctx = context.WithValue(ctx, namespaceDeletionNamespaceKey, testNamespace) + ctx = context.WithValue(ctx, namespaceDeletionScalityUIKey, scalityUIName) + ctx = context.WithValue(ctx, namespaceDeletionComponentKey, componentName) + ctx = context.WithValue(ctx, namespaceDeletionExposerKey, exposerName) + return ctx + }). + Assess("create ScalityUI", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + client := cfg.Client() + scalityUIName := ctx.Value(namespaceDeletionScalityUIKey).(string) + + if err := framework.NewScalityUIBuilder(scalityUIName). + WithProductName("Namespace Deletion Test"). + Create(ctx, client); err != nil { + t.Fatalf("Failed to create ScalityUI: %v", err) + } + t.Logf("Created ScalityUI %s", scalityUIName) + + if err := framework.WaitForScalityUIReady(ctx, client, scalityUIName, framework.LongTimeout); err != nil { + t.Fatalf("ScalityUI not ready: %v", err) + } + t.Logf("ScalityUI is ready") + + return ctx + }). + Assess("create Component and Exposer in test namespace", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + client := cfg.Client() + namespace := ctx.Value(namespaceDeletionNamespaceKey).(string) + scalityUIName := ctx.Value(namespaceDeletionScalityUIKey).(string) + componentName := ctx.Value(namespaceDeletionComponentKey).(string) + exposerName := ctx.Value(namespaceDeletionExposerKey).(string) + + if err := framework.NewScalityUIComponentBuilder(componentName, namespace). + WithImage(framework.MockServerImage). + Create(ctx, client); err != nil { + t.Fatalf("Failed to create ScalityUIComponent: %v", err) + } + t.Logf("Created ScalityUIComponent %s", componentName) + + if err := framework.WaitForDeploymentReady(ctx, client, namespace, componentName, framework.LongTimeout); err != nil { + t.Fatalf("Component deployment not ready: %v", err) + } + + if err := framework.WaitForScalityUIComponentConfigured(ctx, client, namespace, componentName, framework.LongTimeout); err != nil { + t.Fatalf("Component not configured: %v", err) + } + t.Logf("Component ready and configured") + + if err := framework.NewScalityUIComponentExposerBuilder(exposerName, namespace). + WithScalityUI(scalityUIName). + WithScalityUIComponent(componentName). + WithAppHistoryBasePath("/cascade-cleanup"). + Create(ctx, client); err != nil { + t.Fatalf("Failed to create Exposer: %v", err) + } + t.Logf("Created ScalityUIComponentExposer %s", exposerName) + + if err := framework.WaitForScalityUIComponentExposerReady(ctx, client, namespace, exposerName, framework.LongTimeout); err != nil { + t.Fatalf("Exposer not ready: %v", err) + } + t.Logf("Exposer is ready") + + return ctx + }). + Assess("verify component in deployed-apps", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + client := cfg.Client() + scalityUIName := ctx.Value(namespaceDeletionScalityUIKey).(string) + componentName := ctx.Value(namespaceDeletionComponentKey).(string) + + if err := framework.WaitForDeployedAppsContains(ctx, client, scalityUIName, componentName, framework.LongTimeout); err != nil { + t.Fatalf("Component not in deployed-apps: %v", err) + } + t.Logf("Verified: Component %s is in deployed-apps ConfigMap", componentName) + + apps, err := framework.GetDeployedApps(ctx, client, scalityUIName) + if err != nil { + t.Fatalf("Failed to get deployed-apps: %v", err) + } + t.Logf("Deployed apps before deletion: %v", apps) + + return ctx + }). + Assess("delete namespace", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + client := cfg.Client() + namespace := ctx.Value(namespaceDeletionNamespaceKey).(string) + + ns := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{Name: namespace}, + } + if err := client.Resources().Delete(ctx, ns); err != nil { + t.Fatalf("Failed to delete namespace: %v", err) + } + t.Logf("Triggered deletion of namespace %s", namespace) + + return ctx + }). + Assess("wait for namespace deletion", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + client := cfg.Client() + namespace := ctx.Value(namespaceDeletionNamespaceKey).(string) + + if err := framework.WaitForNamespaceDeleted(ctx, client, namespace, framework.LongTimeout); err != nil { + t.Fatalf("Namespace not deleted: %v", err) + } + t.Logf("Namespace %s fully deleted", namespace) + + return ctx + }). + Assess("verify component removed from deployed-apps", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + client := cfg.Client() + scalityUIName := ctx.Value(namespaceDeletionScalityUIKey).(string) + componentName := ctx.Value(namespaceDeletionComponentKey).(string) + + if err := framework.WaitForDeployedAppsNotContains(ctx, client, scalityUIName, componentName, framework.LongTimeout); err != nil { + t.Fatalf("Component still in deployed-apps after namespace deletion: %v", err) + } + t.Logf("Verified: Component %s removed from deployed-apps ConfigMap", componentName) + + if err := framework.WaitForDeployedAppsCount(ctx, client, scalityUIName, 0, framework.DefaultTimeout); err != nil { + t.Fatalf("Deployed-apps count not 0: %v", err) + } + t.Logf("Verified: deployed-apps is empty after namespace deletion") + + return ctx + }). + Teardown(func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + client := cfg.Client() + namespace := ctx.Value(namespaceDeletionNamespaceKey).(string) + scalityUIName := ctx.Value(namespaceDeletionScalityUIKey).(string) + + if err := framework.DeleteScalityUI(ctx, client, scalityUIName); err != nil { + t.Logf("Warning: Failed to delete ScalityUI: %v", err) + } else { + t.Logf("Deleted ScalityUI %s", scalityUIName) + } + + ns := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}} + if err := client.Resources().Delete(ctx, ns); err != nil { + t.Logf("Warning: Failed to delete namespace: %v", err) + } else { + t.Logf("Deleted namespace %s", namespace) + } + + return ctx + }). + Feature() + + testenv.Test(t, feature) +} diff --git a/test/e2e/pod_lifecycle_test.go b/test/e2e/pod_lifecycle_test.go new file mode 100644 index 0000000..b2054e8 --- /dev/null +++ b/test/e2e/pod_lifecycle_test.go @@ -0,0 +1,544 @@ +/* +Copyright 2025. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "context" + "testing" + "time" + + uiv1alpha1 "github.com/scality/ui-operator/api/v1alpha1" + "github.com/scality/ui-operator/test/e2e/framework" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/util/retry" + "sigs.k8s.io/e2e-framework/pkg/envconf" + "sigs.k8s.io/e2e-framework/pkg/features" +) + +const ( + componentConfigHashAnnotation = "ui.scality.com/config-hash" +) + +type podLifecycleContextKey string + +const ( + podLifecycleNamespaceKey podLifecycleContextKey = "pod-lifecycle-namespace" + podLifecycleScalityUIKey podLifecycleContextKey = "pod-lifecycle-scalityui" + podLifecycleComponentKey podLifecycleContextKey = "pod-lifecycle-component" + podLifecycleExposerKey podLifecycleContextKey = "pod-lifecycle-exposer" +) + +func TestPodLifecycle_RollingUpdateOnConfigChange(t *testing.T) { + feature := features.New("rolling-update-on-config-change"). + Setup(func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + client := cfg.Client() + testNamespace := envconf.RandomName("rolling-update", 16) + scalityUIName := envconf.RandomName("rolling-update-ui", 24) + componentName := envconf.RandomName("rolling-update-comp", 24) + exposerName := envconf.RandomName("rolling-update-exp", 24) + + ns := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{Name: testNamespace}, + } + if err := client.Resources().Create(ctx, ns); err != nil { + t.Fatalf("Failed to create namespace: %v", err) + } + t.Logf("Created namespace %s", testNamespace) + + ctx = context.WithValue(ctx, podLifecycleNamespaceKey, testNamespace) + ctx = context.WithValue(ctx, podLifecycleScalityUIKey, scalityUIName) + ctx = context.WithValue(ctx, podLifecycleComponentKey, componentName) + ctx = context.WithValue(ctx, podLifecycleExposerKey, exposerName) + return ctx + }). + Assess("create full resource chain", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + client := cfg.Client() + namespace := ctx.Value(podLifecycleNamespaceKey).(string) + scalityUIName := ctx.Value(podLifecycleScalityUIKey).(string) + componentName := ctx.Value(podLifecycleComponentKey).(string) + exposerName := ctx.Value(podLifecycleExposerKey).(string) + + if err := framework.NewScalityUIBuilder(scalityUIName). + WithProductName("Rolling Update Test"). + Create(ctx, client); err != nil { + t.Fatalf("Failed to create ScalityUI: %v", err) + } + t.Logf("Created ScalityUI %s", scalityUIName) + + if err := framework.WaitForScalityUIReady(ctx, client, scalityUIName, framework.LongTimeout); err != nil { + t.Fatalf("ScalityUI not ready: %v", err) + } + + if err := framework.NewScalityUIComponentBuilder(componentName, namespace). + WithImage(framework.MockServerImage). + Create(ctx, client); err != nil { + t.Fatalf("Failed to create ScalityUIComponent: %v", err) + } + t.Logf("Created ScalityUIComponent %s", componentName) + + if err := framework.WaitForDeploymentReady(ctx, client, namespace, componentName, framework.LongTimeout); err != nil { + t.Fatalf("Component deployment not ready: %v", err) + } + + if err := framework.WaitForScalityUIComponentConfigured(ctx, client, namespace, componentName, framework.LongTimeout); err != nil { + t.Fatalf("Component not configured: %v", err) + } + + if err := framework.NewScalityUIComponentExposerBuilder(exposerName, namespace). + WithScalityUI(scalityUIName). + WithScalityUIComponent(componentName). + WithAppHistoryBasePath("/initial-path"). + Create(ctx, client); err != nil { + t.Fatalf("Failed to create Exposer: %v", err) + } + t.Logf("Created ScalityUIComponentExposer %s", exposerName) + + if err := framework.WaitForScalityUIComponentExposerReady(ctx, client, namespace, exposerName, framework.LongTimeout); err != nil { + t.Fatalf("Exposer not ready: %v", err) + } + + if err := framework.WaitForDeploymentReady(ctx, client, namespace, componentName, framework.LongTimeout); err != nil { + t.Fatalf("Component deployment not ready after exposer: %v", err) + } + + return ctx + }). + Assess("wait for stability and record initial state", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + client := cfg.Client() + namespace := ctx.Value(podLifecycleNamespaceKey).(string) + componentName := ctx.Value(podLifecycleComponentKey).(string) + + activeRS, err := framework.WaitForDeploymentStable(ctx, client, namespace, componentName, framework.LongTimeout) + if err != nil { + t.Fatalf("Deployment not stable: %v", err) + } + t.Logf("Deployment stable with single active ReplicaSet: %s (replicas=%d)", activeRS.Name, activeRS.Status.Replicas) + + initialHash, err := framework.GetDeploymentPodTemplateHash(ctx, client, namespace, componentName, componentConfigHashAnnotation) + if err != nil { + t.Fatalf("Failed to get initial hash: %v", err) + } + t.Logf("Initial component config hash: %s", initialHash) + + ctx = context.WithValue(ctx, podLifecycleContextKey("initial-hash"), initialHash) + ctx = context.WithValue(ctx, podLifecycleContextKey("initial-rs-name"), activeRS.Name) + return ctx + }). + Assess("modify exposer appHistoryBasePath", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + client := cfg.Client() + namespace := ctx.Value(podLifecycleNamespaceKey).(string) + exposerName := ctx.Value(podLifecycleExposerKey).(string) + + err := retry.RetryOnConflict(retry.DefaultRetry, func() error { + exposer := &uiv1alpha1.ScalityUIComponentExposer{} + if err := client.Resources(namespace).Get(ctx, exposerName, namespace, exposer); err != nil { + return err + } + exposer.Spec.AppHistoryBasePath = "/updated-path" + return client.Resources(namespace).Update(ctx, exposer) + }) + if err != nil { + t.Fatalf("Failed to update exposer: %v", err) + } + t.Logf("Updated exposer appHistoryBasePath to /updated-path") + + return ctx + }). + Assess("verify component deployment rolling update", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + client := cfg.Client() + namespace := ctx.Value(podLifecycleNamespaceKey).(string) + componentName := ctx.Value(podLifecycleComponentKey).(string) + initialHash := ctx.Value(podLifecycleContextKey("initial-hash")).(string) + initialRSName := ctx.Value(podLifecycleContextKey("initial-rs-name")).(string) + + newHash, err := framework.WaitForDeploymentAnnotationChange(ctx, client, namespace, componentName, + componentConfigHashAnnotation, initialHash, framework.LongTimeout) + if err != nil { + t.Fatalf("Hash annotation did not change: %v", err) + } + t.Logf("Component config hash changed: %s -> %s", initialHash, newHash) + + newRSName, err := framework.WaitForNewReplicaSet(ctx, client, namespace, componentName, []string{initialRSName}, framework.LongTimeout) + if err != nil { + t.Fatalf("New ReplicaSet not created: %v", err) + } + t.Logf("New ReplicaSet created: %s", newRSName) + + if err := framework.WaitForDeploymentReady(ctx, client, namespace, componentName, framework.LongTimeout); err != nil { + t.Fatalf("Deployment not ready after rolling update: %v", err) + } + + if err := framework.WaitForReplicaSetScaledDown(ctx, client, namespace, initialRSName, framework.LongTimeout); err != nil { + t.Fatalf("Old ReplicaSet %s did not scale down: %v", initialRSName, err) + } + t.Logf("Old ReplicaSet %s scaled down to 0", initialRSName) + + return ctx + }). + Teardown(func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + client := cfg.Client() + namespace := ctx.Value(podLifecycleNamespaceKey).(string) + scalityUIName := ctx.Value(podLifecycleScalityUIKey).(string) + + if err := framework.DeleteScalityUI(ctx, client, scalityUIName); err != nil { + t.Logf("Warning: Failed to delete ScalityUI: %v", err) + } + + ns := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}} + if err := client.Resources().Delete(ctx, ns); err != nil { + t.Logf("Warning: Failed to delete namespace: %v", err) + } else { + t.Logf("Deleted namespace %s", namespace) + } + + return ctx + }). + Feature() + + testenv.Test(t, feature) +} + +func TestPodLifecycle_OperatorCrashRecovery(t *testing.T) { + feature := features.New("operator-crash-recovery"). + WithLabel("disruptive", "true"). + Setup(func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + client := cfg.Client() + testNamespace := envconf.RandomName("crash-recovery", 16) + scalityUIName := envconf.RandomName("crash-recovery-ui", 24) + componentName := envconf.RandomName("crash-recovery-comp", 24) + exposerName := envconf.RandomName("crash-recovery-exp", 24) + + ns := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{Name: testNamespace}, + } + if err := client.Resources().Create(ctx, ns); err != nil { + t.Fatalf("Failed to create namespace: %v", err) + } + t.Logf("Created namespace %s", testNamespace) + + ctx = context.WithValue(ctx, podLifecycleNamespaceKey, testNamespace) + ctx = context.WithValue(ctx, podLifecycleScalityUIKey, scalityUIName) + ctx = context.WithValue(ctx, podLifecycleComponentKey, componentName) + ctx = context.WithValue(ctx, podLifecycleExposerKey, exposerName) + return ctx + }). + Assess("create full resource chain", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + client := cfg.Client() + namespace := ctx.Value(podLifecycleNamespaceKey).(string) + scalityUIName := ctx.Value(podLifecycleScalityUIKey).(string) + componentName := ctx.Value(podLifecycleComponentKey).(string) + exposerName := ctx.Value(podLifecycleExposerKey).(string) + + if err := framework.NewScalityUIBuilder(scalityUIName). + WithProductName("Crash Recovery Test"). + Create(ctx, client); err != nil { + t.Fatalf("Failed to create ScalityUI: %v", err) + } + + if err := framework.WaitForScalityUIReady(ctx, client, scalityUIName, framework.LongTimeout); err != nil { + t.Fatalf("ScalityUI not ready: %v", err) + } + t.Logf("ScalityUI ready") + + if err := framework.NewScalityUIComponentBuilder(componentName, namespace). + WithImage(framework.MockServerImage). + Create(ctx, client); err != nil { + t.Fatalf("Failed to create component: %v", err) + } + + if err := framework.WaitForDeploymentReady(ctx, client, namespace, componentName, framework.LongTimeout); err != nil { + t.Fatalf("Component deployment not ready: %v", err) + } + + if err := framework.WaitForScalityUIComponentConfigured(ctx, client, namespace, componentName, framework.LongTimeout); err != nil { + t.Fatalf("Component not configured: %v", err) + } + t.Logf("Component ready and configured") + + if err := framework.NewScalityUIComponentExposerBuilder(exposerName, namespace). + WithScalityUI(scalityUIName). + WithScalityUIComponent(componentName). + WithAppHistoryBasePath("/crash-test"). + Create(ctx, client); err != nil { + t.Fatalf("Failed to create exposer: %v", err) + } + + if err := framework.WaitForScalityUIComponentExposerReady(ctx, client, namespace, exposerName, framework.LongTimeout); err != nil { + t.Fatalf("Exposer not ready: %v", err) + } + t.Logf("Exposer ready - full chain established") + + return ctx + }). + Assess("delete operator pod (simulate crash)", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + client := cfg.Client() + + t.Logf("Deleting operator pod to simulate crash...") + if err := framework.DeleteOperatorPod(ctx, client, framework.LongTimeout); err != nil { + t.Fatalf("Failed to restart operator: %v", err) + } + t.Logf("Operator pod restarted successfully") + + return ctx + }). + Assess("verify all resources recovered", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + client := cfg.Client() + namespace := ctx.Value(podLifecycleNamespaceKey).(string) + scalityUIName := ctx.Value(podLifecycleScalityUIKey).(string) + componentName := ctx.Value(podLifecycleComponentKey).(string) + exposerName := ctx.Value(podLifecycleExposerKey).(string) + + if err := framework.WaitForScalityUIReady(ctx, client, scalityUIName, framework.DefaultTimeout); err != nil { + t.Fatalf("ScalityUI not ready after recovery: %v", err) + } + t.Logf("ScalityUI still ready after crash recovery") + + if err := framework.WaitForScalityUIComponentCondition(ctx, client, namespace, componentName, + framework.ConditionConfigurationRetrieved, metav1.ConditionTrue, framework.DefaultTimeout); err != nil { + t.Fatalf("Component ConfigurationRetrieved not True: %v", err) + } + t.Logf("Component ConfigurationRetrieved still True") + + if err := framework.WaitForScalityUIComponentExposerReady(ctx, client, namespace, exposerName, framework.DefaultTimeout); err != nil { + t.Fatalf("Exposer not ready after recovery: %v", err) + } + t.Logf("Exposer still ready after crash recovery") + + if err := framework.WaitForDeploymentReady(ctx, client, namespace, componentName, framework.DefaultTimeout); err != nil { + t.Fatalf("Component deployment not ready: %v", err) + } + t.Logf("Component deployment still running") + + if err := framework.WaitForServiceExists(ctx, client, namespace, componentName, framework.DefaultTimeout); err != nil { + t.Fatalf("Component service not found: %v", err) + } + t.Logf("Component service still exists") + + runtimeConfigMapName := componentName + framework.RuntimeConfigMapSuffix + if err := framework.WaitForConfigMapExists(ctx, client, namespace, runtimeConfigMapName, framework.DefaultTimeout); err != nil { + t.Fatalf("Runtime ConfigMap not found: %v", err) + } + t.Logf("Runtime ConfigMap still exists") + + t.Logf("All resources recovered successfully after operator crash") + return ctx + }). + Teardown(func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + client := cfg.Client() + namespace := ctx.Value(podLifecycleNamespaceKey).(string) + scalityUIName := ctx.Value(podLifecycleScalityUIKey).(string) + + if err := framework.DeleteScalityUI(ctx, client, scalityUIName); err != nil { + t.Logf("Warning: Failed to delete ScalityUI: %v", err) + } + + ns := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}} + if err := client.Resources().Delete(ctx, ns); err != nil { + t.Logf("Warning: Failed to delete namespace: %v", err) + } else { + t.Logf("Deleted namespace %s", namespace) + } + + return ctx + }). + Feature() + + testenv.Test(t, feature) +} + +func TestPodLifecycle_NoSpuriousUpdatesAfterRestart(t *testing.T) { + feature := features.New("no-spurious-updates-after-restart"). + WithLabel("disruptive", "true"). + Setup(func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + client := cfg.Client() + testNamespace := envconf.RandomName("no-spurious", 16) + scalityUIName := envconf.RandomName("no-spurious-ui", 24) + componentName := envconf.RandomName("no-spurious-comp", 24) + exposerName := envconf.RandomName("no-spurious-exp", 24) + + ns := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{Name: testNamespace}, + } + if err := client.Resources().Create(ctx, ns); err != nil { + t.Fatalf("Failed to create namespace: %v", err) + } + t.Logf("Created namespace %s", testNamespace) + + ctx = context.WithValue(ctx, podLifecycleNamespaceKey, testNamespace) + ctx = context.WithValue(ctx, podLifecycleScalityUIKey, scalityUIName) + ctx = context.WithValue(ctx, podLifecycleComponentKey, componentName) + ctx = context.WithValue(ctx, podLifecycleExposerKey, exposerName) + return ctx + }). + Assess("create full resource chain and wait for stability", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + client := cfg.Client() + namespace := ctx.Value(podLifecycleNamespaceKey).(string) + scalityUIName := ctx.Value(podLifecycleScalityUIKey).(string) + componentName := ctx.Value(podLifecycleComponentKey).(string) + exposerName := ctx.Value(podLifecycleExposerKey).(string) + + if err := framework.NewScalityUIBuilder(scalityUIName). + WithProductName("No Spurious Updates Test"). + Create(ctx, client); err != nil { + t.Fatalf("Failed to create ScalityUI: %v", err) + } + + if err := framework.WaitForScalityUIReady(ctx, client, scalityUIName, framework.LongTimeout); err != nil { + t.Fatalf("ScalityUI not ready: %v", err) + } + + if err := framework.NewScalityUIComponentBuilder(componentName, namespace). + WithImage(framework.MockServerImage). + Create(ctx, client); err != nil { + t.Fatalf("Failed to create component: %v", err) + } + + if err := framework.WaitForDeploymentReady(ctx, client, namespace, componentName, framework.LongTimeout); err != nil { + t.Fatalf("Component deployment not ready: %v", err) + } + + if err := framework.WaitForScalityUIComponentConfigured(ctx, client, namespace, componentName, framework.LongTimeout); err != nil { + t.Fatalf("Component not configured: %v", err) + } + + if err := framework.NewScalityUIComponentExposerBuilder(exposerName, namespace). + WithScalityUI(scalityUIName). + WithScalityUIComponent(componentName). + WithAppHistoryBasePath("/no-spurious"). + Create(ctx, client); err != nil { + t.Fatalf("Failed to create exposer: %v", err) + } + + if err := framework.WaitForScalityUIComponentExposerReady(ctx, client, namespace, exposerName, framework.LongTimeout); err != nil { + t.Fatalf("Exposer not ready: %v", err) + } + + if _, err := framework.WaitForDeploymentStable(ctx, client, namespace, componentName, framework.LongTimeout); err != nil { + t.Fatalf("Deployment not stable after exposer: %v", err) + } + + t.Logf("Full chain established and stable") + return ctx + }). + Assess("record resource versions", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + client := cfg.Client() + namespace := ctx.Value(podLifecycleNamespaceKey).(string) + componentName := ctx.Value(podLifecycleComponentKey).(string) + + refs := []framework.ResourceRef{ + {Kind: "Deployment", Namespace: namespace, Name: componentName}, + {Kind: "Service", Namespace: namespace, Name: componentName}, + {Kind: "ConfigMap", Namespace: namespace, Name: componentName + framework.RuntimeConfigMapSuffix}, + } + + versions, err := framework.GetResourceVersions(ctx, client, refs) + if err != nil { + t.Fatalf("Failed to get resource versions: %v", err) + } + + for key, version := range versions { + t.Logf("Initial ResourceVersion: %s = %s", key, version) + } + + ctx = context.WithValue(ctx, podLifecycleContextKey("resource-versions"), versions) + ctx = context.WithValue(ctx, podLifecycleContextKey("resource-refs"), refs) + return ctx + }). + Assess("restart operator", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + client := cfg.Client() + + t.Logf("Restarting operator...") + if err := framework.DeleteOperatorPod(ctx, client, framework.LongTimeout); err != nil { + t.Fatalf("Failed to restart operator: %v", err) + } + t.Logf("Operator restarted") + + return ctx + }). + Assess("trigger reconcile and verify no spurious updates", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + client := cfg.Client() + namespace := ctx.Value(podLifecycleNamespaceKey).(string) + componentName := ctx.Value(podLifecycleComponentKey).(string) + initialVersions := ctx.Value(podLifecycleContextKey("resource-versions")).(map[string]string) + refs := ctx.Value(podLifecycleContextKey("resource-refs")).([]framework.ResourceRef) + + err := retry.RetryOnConflict(retry.DefaultRetry, func() error { + component := &uiv1alpha1.ScalityUIComponent{} + if err := client.Resources(namespace).Get(ctx, componentName, namespace, component); err != nil { + return err + } + if component.Labels == nil { + component.Labels = make(map[string]string) + } + component.Labels["trigger-reconcile"] = "true" + return client.Resources(namespace).Update(ctx, component) + }) + if err != nil { + t.Fatalf("Failed to trigger reconcile: %v", err) + } + t.Logf("Triggered reconcile via label update") + + t.Logf("Waiting 3s for any potential updates...") + time.Sleep(3 * time.Second) + + newVersions, err := framework.GetResourceVersions(ctx, client, refs) + if err != nil { + t.Fatalf("Failed to get new resource versions: %v", err) + } + + allUnchanged := true + for key, initialVersion := range initialVersions { + newVersion := newVersions[key] + if initialVersion != newVersion { + t.Errorf("ResourceVersion changed for %s: %s -> %s", key, initialVersion, newVersion) + allUnchanged = false + } else { + t.Logf("ResourceVersion unchanged: %s = %s", key, newVersion) + } + } + + if allUnchanged { + t.Logf("SUCCESS: No spurious updates after operator restart") + } else { + t.Fatalf("FAILED: Some resources were updated spuriously") + } + + return ctx + }). + Teardown(func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + client := cfg.Client() + namespace := ctx.Value(podLifecycleNamespaceKey).(string) + scalityUIName := ctx.Value(podLifecycleScalityUIKey).(string) + + if err := framework.DeleteScalityUI(ctx, client, scalityUIName); err != nil { + t.Logf("Warning: Failed to delete ScalityUI: %v", err) + } + + ns := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}} + if err := client.Resources().Delete(ctx, ns); err != nil { + t.Logf("Warning: Failed to delete namespace: %v", err) + } else { + t.Logf("Deleted namespace %s", namespace) + } + + return ctx + }). + Feature() + + testenv.Test(t, feature) +} diff --git a/test/e2e/real_http_test.go b/test/e2e/real_http_test.go new file mode 100644 index 0000000..ea22ebb --- /dev/null +++ b/test/e2e/real_http_test.go @@ -0,0 +1,588 @@ +/* +Copyright 2025. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "context" + "fmt" + "strconv" + "testing" + "time" + + uiv1alpha1 "github.com/scality/ui-operator/api/v1alpha1" + "github.com/scality/ui-operator/test/e2e/framework" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/util/retry" + "sigs.k8s.io/e2e-framework/pkg/envconf" + "sigs.k8s.io/e2e-framework/pkg/features" +) + +type realHTTPContextKey string + +const realHTTPNamespaceKey realHTTPContextKey = "real-http-namespace" + +func TestRealHTTP_ReconcileStormPrevention(t *testing.T) { + const componentName = "storm-test-component" + + feature := features.New("reconcile-storm-prevention"). + Setup(func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + return setupNamespace(ctx, t, cfg) + }). + Assess("create component and wait for initial config fetch", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + k8sClient := cfg.Client() + namespace := ctx.Value(realHTTPNamespaceKey).(string) + + err := framework.NewScalityUIComponentBuilder(componentName, namespace). + WithImage(framework.MockServerImage). + Create(ctx, k8sClient) + if err != nil { + t.Fatalf("Failed to create ScalityUIComponent: %v", err) + } + t.Logf("Created ScalityUIComponent %s", componentName) + + if err := framework.WaitForDeploymentReady(ctx, k8sClient, namespace, componentName, framework.LongTimeout); err != nil { + t.Fatalf("Deployment not ready: %v", err) + } + + if err := framework.WaitForScalityUIComponentCondition(ctx, k8sClient, namespace, componentName, + framework.ConditionConfigurationRetrieved, metav1.ConditionTrue, framework.LongTimeout); err != nil { + t.Fatalf("Configuration not retrieved: %v", err) + } + t.Logf("Initial configuration retrieved") + + return ctx + }). + Assess("verify counter is 1 after initial fetch", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + namespace := ctx.Value(realHTTPNamespaceKey).(string) + mockClient := framework.NewMockServerClient(namespace, componentName) + + counter, err := mockClient.GetCounter(ctx) + if err != nil { + t.Fatalf("Failed to get counter: %v", err) + } + if counter != 1 { + t.Fatalf("Expected counter=1 after initial fetch, got %d", counter) + } + t.Logf("Counter is 1 after initial fetch") + + return ctx + }). + Assess("trigger 10 reconciles without changing image", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + k8sClient := cfg.Client() + namespace := ctx.Value(realHTTPNamespaceKey).(string) + + for i := 0; i < 10; i++ { + err := retry.RetryOnConflict(retry.DefaultRetry, func() error { + component := &uiv1alpha1.ScalityUIComponent{} + if err := k8sClient.Resources(namespace).Get(ctx, componentName, namespace, component); err != nil { + return err + } + if component.Labels == nil { + component.Labels = make(map[string]string) + } + component.Labels["reconcile-trigger"] = strconv.Itoa(i) + return k8sClient.Resources(namespace).Update(ctx, component) + }) + if err != nil { + t.Fatalf("Failed to update component on iteration %d: %v", i, err) + } + time.Sleep(100 * time.Millisecond) + } + t.Logf("Triggered 10 reconciles") + + time.Sleep(2 * time.Second) + + return ctx + }). + Assess("verify counter still equals 1", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + namespace := ctx.Value(realHTTPNamespaceKey).(string) + mockClient := framework.NewMockServerClient(namespace, componentName) + + counter, err := mockClient.GetCounter(ctx) + if err != nil { + t.Fatalf("Failed to get counter: %v", err) + } + if counter != 1 { + t.Fatalf("Expected counter=1 after 10 reconciles (no image change), got %d", counter) + } + t.Logf("Counter remains 1 - reconcile storm prevention works") + + return ctx + }). + Teardown(func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + return teardownNamespace(ctx, t, cfg) + }). + Feature() + + testenv.Test(t, feature) +} + +func TestRealHTTP_TimeoutHandling(t *testing.T) { + const componentName = "timeout-test-component" + + feature := features.New("http-timeout-handling"). + Setup(func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + return setupNamespace(ctx, t, cfg) + }). + Assess("create component and wait for initial success", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + k8sClient := cfg.Client() + namespace := ctx.Value(realHTTPNamespaceKey).(string) + + err := framework.NewScalityUIComponentBuilder(componentName, namespace). + WithImage(framework.MockServerImage). + Create(ctx, k8sClient) + if err != nil { + t.Fatalf("Failed to create ScalityUIComponent: %v", err) + } + t.Logf("Created ScalityUIComponent %s", componentName) + + if err := framework.WaitForDeploymentReady(ctx, k8sClient, namespace, componentName, framework.LongTimeout); err != nil { + t.Fatalf("Deployment not ready: %v", err) + } + + if err := framework.WaitForScalityUIComponentCondition(ctx, k8sClient, namespace, componentName, + framework.ConditionConfigurationRetrieved, metav1.ConditionTrue, framework.LongTimeout); err != nil { + t.Fatalf("Initial configuration not retrieved: %v", err) + } + t.Logf("Initial configuration retrieved successfully") + + return ctx + }). + Assess("configure 15s delay and trigger re-fetch", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + k8sClient := cfg.Client() + namespace := ctx.Value(realHTTPNamespaceKey).(string) + mockClient := framework.NewMockServerClient(namespace, componentName) + + if err := mockClient.SetDelay(ctx, 15000); err != nil { + t.Fatalf("Failed to set delay: %v", err) + } + t.Logf("Configured mock server with 15s delay (operator timeout is 10s)") + + err := retry.RetryOnConflict(retry.DefaultRetry, func() error { + component := &uiv1alpha1.ScalityUIComponent{} + if err := k8sClient.Resources(namespace).Get(ctx, componentName, namespace, component); err != nil { + return err + } + if component.Annotations == nil { + component.Annotations = make(map[string]string) + } + component.Annotations[uiv1alpha1.ForceRefreshAnnotation] = "true" + return k8sClient.Resources(namespace).Update(ctx, component) + }) + if err != nil { + t.Fatalf("Failed to set force-refresh annotation: %v", err) + } + t.Logf("Set force-refresh annotation to trigger re-fetch") + + return ctx + }). + Assess("verify timeout failure", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + k8sClient := cfg.Client() + namespace := ctx.Value(realHTTPNamespaceKey).(string) + + timeout := 30 * time.Second + if err := framework.WaitForScalityUIComponentCondition(ctx, k8sClient, namespace, componentName, + framework.ConditionConfigurationRetrieved, metav1.ConditionFalse, timeout); err != nil { + t.Fatalf("Expected ConfigurationRetrieved=False due to timeout: %v", err) + } + + component, err := framework.GetScalityUIComponent(ctx, k8sClient, namespace, componentName) + if err != nil { + t.Fatalf("Failed to get component: %v", err) + } + + for _, cond := range component.Status.Conditions { + if cond.Type == framework.ConditionConfigurationRetrieved { + if cond.Reason != "FetchFailed" { + t.Fatalf("Expected Reason=FetchFailed, got %s", cond.Reason) + } + t.Logf("Verified: ConfigurationRetrieved=False, Reason=FetchFailed") + break + } + } + + return ctx + }). + Teardown(func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + return teardownNamespace(ctx, t, cfg) + }). + Feature() + + testenv.Test(t, feature) +} + +// TestRealHTTP_RecoveryAfterServerFailure verifies recovery after server failure. +// Strategy: Reset mock server config, then wait for controller's natural retry (RequeueAfter: 30s). +func TestRealHTTP_RecoveryAfterServerFailure(t *testing.T) { + const componentName = "recovery-test-component" + + feature := features.New("recovery-after-server-failure"). + Setup(func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + return setupNamespace(ctx, t, cfg) + }). + Assess("create component and wait for initial success", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + k8sClient := cfg.Client() + namespace := ctx.Value(realHTTPNamespaceKey).(string) + + err := framework.NewScalityUIComponentBuilder(componentName, namespace). + WithImage(framework.MockServerImage). + Create(ctx, k8sClient) + if err != nil { + t.Fatalf("Failed to create ScalityUIComponent: %v", err) + } + t.Logf("Created ScalityUIComponent %s", componentName) + + if err := framework.WaitForDeploymentReady(ctx, k8sClient, namespace, componentName, framework.LongTimeout); err != nil { + t.Fatalf("Deployment not ready: %v", err) + } + + if err := framework.WaitForScalityUIComponentCondition(ctx, k8sClient, namespace, componentName, + framework.ConditionConfigurationRetrieved, metav1.ConditionTrue, framework.LongTimeout); err != nil { + t.Fatalf("Initial configuration not retrieved: %v", err) + } + t.Logf("Initial configuration retrieved successfully") + + return ctx + }). + Assess("configure 500 error and trigger re-fetch", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + k8sClient := cfg.Client() + namespace := ctx.Value(realHTTPNamespaceKey).(string) + mockClient := framework.NewMockServerClient(namespace, componentName) + + if err := mockClient.SetStatusCode(ctx, 500); err != nil { + t.Fatalf("Failed to set status code: %v", err) + } + t.Logf("Configured mock server to return 500") + + err := retry.RetryOnConflict(retry.DefaultRetry, func() error { + component := &uiv1alpha1.ScalityUIComponent{} + if err := k8sClient.Resources(namespace).Get(ctx, componentName, namespace, component); err != nil { + return err + } + if component.Annotations == nil { + component.Annotations = make(map[string]string) + } + component.Annotations[uiv1alpha1.ForceRefreshAnnotation] = "true" + return k8sClient.Resources(namespace).Update(ctx, component) + }) + if err != nil { + t.Fatalf("Failed to set force-refresh annotation: %v", err) + } + t.Logf("Set force-refresh annotation to trigger re-fetch") + + return ctx + }). + Assess("verify failure status", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + k8sClient := cfg.Client() + namespace := ctx.Value(realHTTPNamespaceKey).(string) + + if err := framework.WaitForScalityUIComponentCondition(ctx, k8sClient, namespace, componentName, + framework.ConditionConfigurationRetrieved, metav1.ConditionFalse, framework.LongTimeout); err != nil { + t.Fatalf("Expected ConfigurationRetrieved=False: %v", err) + } + t.Logf("Verified: ConfigurationRetrieved=False (server returning 500)") + + return ctx + }). + Assess("recover mock server and trigger explicit re-fetch", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + k8sClient := cfg.Client() + namespace := ctx.Value(realHTTPNamespaceKey).(string) + mockClient := framework.NewMockServerClient(namespace, componentName) + + // First, remove force-refresh so we can control when the next fetch happens + err := retry.RetryOnConflict(retry.DefaultRetry, func() error { + component := &uiv1alpha1.ScalityUIComponent{} + if err := k8sClient.Resources(namespace).Get(ctx, componentName, namespace, component); err != nil { + return err + } + delete(component.Annotations, uiv1alpha1.ForceRefreshAnnotation) + return k8sClient.Resources(namespace).Update(ctx, component) + }) + if err != nil { + t.Fatalf("Failed to remove force-refresh: %v", err) + } + t.Logf("Removed force-refresh annotation") + + if err := framework.WaitForScalityUIComponentAnnotationAbsent(ctx, k8sClient, namespace, componentName, + uiv1alpha1.ForceRefreshAnnotation, framework.DefaultTimeout); err != nil { + t.Fatalf("Failed to verify force-refresh annotation removed: %v", err) + } + + // Reset mock server to return 200 + if err := mockClient.Reset(ctx); err != nil { + t.Fatalf("Failed to reset mock server: %v", err) + } + t.Logf("Reset mock server to return 200") + + // Verify mock server is returning 200 + statusCode, _, err := mockClient.TestFetch(ctx) + if err != nil { + t.Fatalf("Failed to test fetch: %v", err) + } + if statusCode != 200 { + t.Fatalf("Mock server should return 200, got %d", statusCode) + } + t.Logf("Verified: Mock server returns 200") + + // Now add force-refresh to trigger a new fetch + err = retry.RetryOnConflict(retry.DefaultRetry, func() error { + component := &uiv1alpha1.ScalityUIComponent{} + if err := k8sClient.Resources(namespace).Get(ctx, componentName, namespace, component); err != nil { + return err + } + if component.Annotations == nil { + component.Annotations = make(map[string]string) + } + component.Annotations[uiv1alpha1.ForceRefreshAnnotation] = "true" + return k8sClient.Resources(namespace).Update(ctx, component) + }) + if err != nil { + t.Fatalf("Failed to add force-refresh: %v", err) + } + t.Logf("Added force-refresh to trigger new fetch") + + // Wait for condition to become True + if err := framework.WaitForScalityUIComponentCondition(ctx, k8sClient, namespace, componentName, + framework.ConditionConfigurationRetrieved, metav1.ConditionTrue, 30*time.Second); err != nil { + // Debug info + component := &uiv1alpha1.ScalityUIComponent{} + _ = k8sClient.Resources(namespace).Get(ctx, componentName, namespace, component) + t.Logf("Debug: Annotations=%v", component.Annotations) + t.Logf("Debug: LastFetchedImage=%s", component.Status.LastFetchedImage) + for _, cond := range component.Status.Conditions { + t.Logf("Debug: Condition %s=%s (Reason=%s, Message=%s)", + cond.Type, cond.Status, cond.Reason, cond.Message) + } + + counter, _ := mockClient.GetCounter(ctx) + t.Logf("Debug: MockServer counter=%d", counter) + + statusCode, _, _ := mockClient.TestFetch(ctx) + t.Logf("Debug: MockServer returns status=%d", statusCode) + + t.Fatalf("Expected ConfigurationRetrieved=True: %v", err) + } + t.Logf("Verified: ConfigurationRetrieved=True (recovered)") + + return ctx + }). + Teardown(func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + return teardownNamespace(ctx, t, cfg) + }). + Feature() + + testenv.Test(t, feature) +} + +// TestRealHTTP_NoFetchWithoutTrigger verifies that after initial success, +// the controller doesn't make new HTTP requests without a trigger (image change or force-refresh). +func TestRealHTTP_NoFetchWithoutTrigger(t *testing.T) { + const componentName = "no-fetch-test-component" + + feature := features.New("no-fetch-without-trigger"). + Setup(func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + return setupNamespace(ctx, t, cfg) + }). + Assess("create component and wait for initial success", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + k8sClient := cfg.Client() + namespace := ctx.Value(realHTTPNamespaceKey).(string) + + err := framework.NewScalityUIComponentBuilder(componentName, namespace). + WithImage(framework.MockServerImage). + Create(ctx, k8sClient) + if err != nil { + t.Fatalf("Failed to create ScalityUIComponent: %v", err) + } + t.Logf("Created ScalityUIComponent %s", componentName) + + if err := framework.WaitForDeploymentReady(ctx, k8sClient, namespace, componentName, framework.LongTimeout); err != nil { + t.Fatalf("Deployment not ready: %v", err) + } + + if err := framework.WaitForScalityUIComponentCondition(ctx, k8sClient, namespace, componentName, + framework.ConditionConfigurationRetrieved, metav1.ConditionTrue, framework.LongTimeout); err != nil { + t.Fatalf("Initial configuration not retrieved: %v", err) + } + t.Logf("Initial configuration retrieved successfully") + + return ctx + }). + Assess("reset counter and configure 500", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + namespace := ctx.Value(realHTTPNamespaceKey).(string) + mockClient := framework.NewMockServerClient(namespace, componentName) + + if err := mockClient.Reset(ctx); err != nil { + t.Fatalf("Failed to reset counter: %v", err) + } + + if err := mockClient.SetStatusCode(ctx, 500); err != nil { + t.Fatalf("Failed to set status code: %v", err) + } + t.Logf("Reset counter to 0 and configured mock server to return 500") + + return ctx + }). + Assess("verify no fetch without trigger", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + k8sClient := cfg.Client() + namespace := ctx.Value(realHTTPNamespaceKey).(string) + mockClient := framework.NewMockServerClient(namespace, componentName) + + // Trigger multiple reconciles by updating labels (not image or force-refresh) + for i := 0; i < 5; i++ { + err := retry.RetryOnConflict(retry.DefaultRetry, func() error { + component := &uiv1alpha1.ScalityUIComponent{} + if err := k8sClient.Resources(namespace).Get(ctx, componentName, namespace, component); err != nil { + return err + } + if component.Labels == nil { + component.Labels = make(map[string]string) + } + component.Labels["trigger-reconcile"] = fmt.Sprintf("value-%d", i) + return k8sClient.Resources(namespace).Update(ctx, component) + }) + if err != nil { + t.Fatalf("Failed to update labels: %v", err) + } + time.Sleep(200 * time.Millisecond) + } + + t.Logf("Triggered 5 reconciles via label updates, waiting 3 seconds...") + time.Sleep(3 * time.Second) + + counter, err := mockClient.GetCounter(ctx) + if err != nil { + t.Fatalf("Failed to get counter: %v", err) + } + + if counter != 0 { + t.Fatalf("Expected counter=0 (no fetch without image change or force-refresh), got %d", counter) + } + t.Logf("Verified: No HTTP requests made (counter=0)") + t.Logf("LastFetchedImage mechanism correctly prevents redundant fetches") + + return ctx + }). + Assess("verify condition still True despite mock returning 500", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + k8sClient := cfg.Client() + namespace := ctx.Value(realHTTPNamespaceKey).(string) + + component := &uiv1alpha1.ScalityUIComponent{} + if err := k8sClient.Resources(namespace).Get(ctx, componentName, namespace, component); err != nil { + t.Fatalf("Failed to get component: %v", err) + } + + var condition *metav1.Condition + for i := range component.Status.Conditions { + if component.Status.Conditions[i].Type == framework.ConditionConfigurationRetrieved { + condition = &component.Status.Conditions[i] + break + } + } + + if condition == nil || condition.Status != metav1.ConditionTrue { + t.Fatalf("Expected condition to still be True (no refetch happened)") + } + t.Logf("Verified: ConfigurationRetrieved=True (no refetch triggered)") + + return ctx + }). + Assess("force-refresh triggers fetch to failing server", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + k8sClient := cfg.Client() + namespace := ctx.Value(realHTTPNamespaceKey).(string) + mockClient := framework.NewMockServerClient(namespace, componentName) + + err := retry.RetryOnConflict(retry.DefaultRetry, func() error { + component := &uiv1alpha1.ScalityUIComponent{} + if err := k8sClient.Resources(namespace).Get(ctx, componentName, namespace, component); err != nil { + return err + } + if component.Annotations == nil { + component.Annotations = make(map[string]string) + } + component.Annotations[uiv1alpha1.ForceRefreshAnnotation] = "true" + return k8sClient.Resources(namespace).Update(ctx, component) + }) + if err != nil { + t.Fatalf("Failed to set force-refresh annotation: %v", err) + } + t.Logf("Set force-refresh annotation") + + // Wait for the request to be made (counter should go from 0 to 1) + if err := framework.WaitForMockServerCounter(ctx, mockClient, 1, framework.DefaultTimeout); err != nil { + t.Fatalf("Fetch request not received after force-refresh: %v", err) + } + t.Logf("Force-refresh triggered HTTP request (counter=1)") + + // Verify condition becomes False (since server returns 500) + if err := framework.WaitForScalityUIComponentCondition(ctx, k8sClient, namespace, componentName, + framework.ConditionConfigurationRetrieved, metav1.ConditionFalse, framework.DefaultTimeout); err != nil { + t.Fatalf("Expected ConfigurationRetrieved=False after 500 error: %v", err) + } + t.Logf("Verified: ConfigurationRetrieved=False after force-refresh to failing server") + + return ctx + }). + Teardown(func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + return teardownNamespace(ctx, t, cfg) + }). + Feature() + + testenv.Test(t, feature) +} + +func setupNamespace(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + k8sClient := cfg.Client() + + testNamespace := envconf.RandomName("real-http", 16) + + ns := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: testNamespace, + }, + } + if err := k8sClient.Resources().Create(ctx, ns); err != nil { + t.Fatalf("Failed to create namespace %s: %v", testNamespace, err) + } + t.Logf("Created namespace %s", testNamespace) + + ctx = context.WithValue(ctx, realHTTPNamespaceKey, testNamespace) + return ctx +} + +func teardownNamespace(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + k8sClient := cfg.Client() + namespace := ctx.Value(realHTTPNamespaceKey).(string) + + mockClient := framework.NewMockServerClient(namespace, "") + if err := mockClient.CleanupCurlPods(ctx); err != nil { + t.Logf("Warning: Failed to cleanup curl pods: %v", err) + } + + ns := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: namespace, + }, + } + if err := k8sClient.Resources().Delete(ctx, ns); err != nil { + t.Logf("Warning: Failed to delete namespace %s: %v", namespace, err) + } else { + t.Logf("Deleted namespace %s", namespace) + } + + return ctx +} diff --git a/test/e2e/testdata/smoke/scalityui.yaml b/test/e2e/testdata/smoke/scalityui.yaml new file mode 100644 index 0000000..21763b0 --- /dev/null +++ b/test/e2e/testdata/smoke/scalityui.yaml @@ -0,0 +1,7 @@ +apiVersion: ui.scality.com/v1alpha1 +kind: ScalityUI +metadata: + name: e2e-smoke-ui +spec: + image: nginx:latest + productName: "E2E Smoke Test" diff --git a/test/e2e/testdata/smoke/scalityuicomponent.yaml b/test/e2e/testdata/smoke/scalityuicomponent.yaml new file mode 100644 index 0000000..7bf1fe9 --- /dev/null +++ b/test/e2e/testdata/smoke/scalityuicomponent.yaml @@ -0,0 +1,8 @@ +apiVersion: ui.scality.com/v1alpha1 +kind: ScalityUIComponent +metadata: + name: e2e-smoke-component + namespace: "{{.namespace}}" +spec: + image: mock-server:e2e + mountPath: /app/config diff --git a/test/e2e/testdata/smoke/scalityuicomponentexposer.yaml b/test/e2e/testdata/smoke/scalityuicomponentexposer.yaml new file mode 100644 index 0000000..b8a4d03 --- /dev/null +++ b/test/e2e/testdata/smoke/scalityuicomponentexposer.yaml @@ -0,0 +1,9 @@ +apiVersion: ui.scality.com/v1alpha1 +kind: ScalityUIComponentExposer +metadata: + name: e2e-smoke-exposer + namespace: "{{.namespace}}" +spec: + scalityUI: e2e-smoke-ui + scalityUIComponent: e2e-smoke-component + appHistoryBasePath: /mock