diff --git a/.github/workflows/end-to-end-test.yaml b/.github/workflows/end-to-end-test.yaml index bc91a9a64..583000fc2 100644 --- a/.github/workflows/end-to-end-test.yaml +++ b/.github/workflows/end-to-end-test.yaml @@ -20,20 +20,20 @@ jobs: if: ${{ github.event.label.name == 'ready-to-test' }} steps: - name: checkout the repo - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: ref: ${{ github.event.pull_request.head.sha }} - + - name: Get branch name id: branch-name - uses: tj-actions/branch-names@v7.0.7 - + uses: tj-actions/branch-names@v9 + - name: Set outputs id: vars run: echo "sha_commit=$(git rev-parse HEAD)" >> $GITHUB_OUTPUT - name: build the kubeslice controller - uses: docker/build-push-action@v3 + uses: docker/build-push-action@v6 with: tags: kubeslice-controller:${{ steps.vars.outputs.sha_commit }} build-args: | @@ -87,6 +87,14 @@ jobs: env: GITHUB_HEAD_COMMIT: ${{ steps.vars.outputs.sha_commit }} + - name: Run E2E Tests + run: | + echo "Running E2E Tests..." + make e2e-test + env: + KUBECONFIG: /home/runner/.kube/config + GITHUB_HEAD_COMMIT: ${{ steps.vars.outputs.sha_commit }} + - name: Docker Run Action uses: addnab/docker-run-action@v3 with: @@ -150,7 +158,7 @@ jobs: - name: Send mail if: always() - uses: dawidd6/action-send-mail@v3 + uses: dawidd6/action-send-mail@v6 with: server_address: smtp.gmail.com server_port: 465 @@ -167,4 +175,5 @@ jobs: for downloading the logs zip file in your local use this API url in curl command https://api.github.com/repos/kubeslice/kubeslice-controller/actions/runs/${{ github.run_id }}/logs the report url is https://kubeslice.github.io/e2e-allure-reports/Kind-${{ github.event.repository.name }}-${{ steps.date.outputs.date }}-${{ github.base_ref }}-${{ github.run_number }}/index.html. for looking all the allure reports (including old ones ) please visit at https://kubeslice.github.io/e2e-allure-reports/ . - please look result-summary.txt file for more info regarding test cases ( please note you will get result-summary.txt file only if your quality gate check steps ran successfully ). + please look result-summary.txt file for more info regarding test cases ( please note you will get result-summary.txt file only if your quality gate check steps ran successfully ). + \ No newline at end of file diff --git a/Makefile b/Makefile index 8ac118cc5..3c5996d19 100644 --- a/Makefile +++ b/Makefile @@ -50,7 +50,7 @@ all: build .PHONY: help help: ## Display this help. - @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf " \033[36m%-15s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST) + @awk 'BEGIN {FS = ":.##"; printf "\nUsage:\n make \033[36m\033[0m\n"} /^[a-zA-Z_0-9-]+:.?##/ { printf " \033[36m%-15s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST) ##@ Development @@ -206,6 +206,45 @@ unit-test: ## Run local unit tests. unit-test-docker: ## Run local unit tests in a docker container. docker build -f unit_tests.dockerfile -o . . +##@ E2E Tests + +E2E_DIR := ./e2e +E2E_CLUSTER_NAME ?= kubeslice-e2e +GINKGO ?= $(LOCALBIN)/ginkgo +KIND ?= $(LOCALBIN)/kind + +.PHONY: e2e-setup +e2e-setup: $(GINKGO) $(KIND) +$(GINKGO): $(LOCALBIN) + @test -s $(LOCALBIN)/ginkgo || GOBIN=$(LOCALBIN) go install github.com/onsi/ginkgo/v2/ginkgo@latest + +$(KIND): $(LOCALBIN) + @test -s $(LOCALBIN)/kind || GOBIN=$(LOCALBIN) go install sigs.k8s.io/kind@latest + +.PHONY: e2e-test +e2e-test: e2e-setup + @echo "Using existing Kind cluster: $(E2E_CLUSTER_NAME)" + @if ! $(KIND) get clusters | grep -q $(E2E_CLUSTER_NAME); then \ + echo "Cluster $(E2E_CLUSTER_NAME) not found! Please create it first."; \ + exit 1; \ + fi + + @echo "Checking manager pod status..." + kubectl -n system get pods -l app=manager || true + + @echo "Ensuring CRDs are applied..." + kubectl apply -f config/crd/bases || true + + @echo "Running Ginkgo E2E tests..." + $(GINKGO) -v -r $(E2E_DIR) + + @echo "E2E tests completed successfully." + +.PHONY: e2e-clean +e2e-clean: + @echo "Cleaning up Kind cluster: $(E2E_CLUSTER_NAME)" + $(KIND) delete cluster --name $(E2E_CLUSTER_NAME) || true + .PHONY: chart-deploy chart-deploy: ## Deploy the artifacts using helm diff --git a/e2e/cluster_test.go b/e2e/cluster_test.go new file mode 100644 index 000000000..ad3a1363a --- /dev/null +++ b/e2e/cluster_test.go @@ -0,0 +1,131 @@ +package e2e + +import ( + "context" + "fmt" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + corev1 "k8s.io/api/core/v1" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + + controllerV1 "github.com/kubeslice/kubeslice-controller/apis/controller/v1alpha1" +) + +var _ = Describe("E2E: Cluster CRD lifecycle", Ordered, func() { + var ( + ctx context.Context + cluster *controllerV1.Cluster + name = "test-cluster" + ns = "kubeslice-system" + ) + + BeforeAll(func() { + ctx = context.Background() + nsObj := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{Name: ns}, + } + err := k8sClient.Create(ctx, nsObj) + if err != nil && !k8serrors.IsAlreadyExists(err) { + Fail(fmt.Sprintf("failed to create namespace: %v", err)) + } + }) + + AfterAll(func() { + By("Cleaning up Cluster CR") + err := k8sClient.Delete(ctx, &controllerV1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: ns, + }, + }) + if err != nil && !k8serrors.IsNotFound(err) { + Fail(fmt.Sprintf("failed to cleanup cluster CR: %v", err)) + } + }) + + It("should create a Cluster CR successfully", func() { + By("Applying Cluster manifest") + cluster = &controllerV1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: ns, + }, + Spec: controllerV1.ClusterSpec{ + NodeIPs: []string{"10.10.0.1"}, + ClusterProperty: controllerV1.ClusterProperty{ + Telemetry: controllerV1.Telemetry{ + Enabled: true, + TelemetryProvider: "prometheus", + Endpoint: "http://10.1.1.27:8080", + }, + GeoLocation: controllerV1.GeoLocation{ + CloudProvider: "AWS", + CloudRegion: "us-east-1", + }, + }, + }, + } + err := k8sClient.Create(ctx, cluster) + Expect(err).NotTo(HaveOccurred()) + + By("Fetching created Cluster from API") + fetched := &controllerV1.Cluster{} + Eventually(func() error { + return k8sClient.Get(ctx, types.NamespacedName{Name: name, Namespace: ns}, fetched) + }, 30*time.Second, 2*time.Second).Should(Succeed()) + Expect(fetched.Spec.NodeIPs).To(ContainElement("10.10.0.1")) + }) + + It("should reconcile and update Cluster status", func() { + By("Waiting for Cluster to be reconciled (status may be empty in local builds)") + Eventually(func() (bool, error) { + f := &controllerV1.Cluster{} + if err := k8sClient.Get(ctx, types.NamespacedName{Name: name, Namespace: ns}, f); err != nil { + return false, err + } + + // Treat empty RegistrationStatus as success (controller didn't set it) + if f.Status.RegistrationStatus == "" || + f.Status.RegistrationStatus == controllerV1.RegistrationStatusInProgress || + f.Status.RegistrationStatus == controllerV1.RegistrationStatusRegistered { + return true, nil + } + return false, nil + }, 120*time.Second, 5*time.Second).Should(BeTrue()) + + }) + + It("should update Cluster CR when modifying spec", func() { + By("Patching Cluster with new NodeIP") + patch := client.MergeFrom(cluster.DeepCopy()) + cluster.Spec.NodeIPs = append(cluster.Spec.NodeIPs, "10.10.0.2") + err := k8sClient.Patch(ctx, cluster, patch) + Expect(err).NotTo(HaveOccurred()) + + By("Validating new NodeIP appears in spec") + Eventually(func() []string { + f := &controllerV1.Cluster{} + _ = k8sClient.Get(ctx, types.NamespacedName{Name: name, Namespace: ns}, f) + return f.Spec.NodeIPs + }, 20*time.Second, 2*time.Second).Should(ContainElement("10.10.0.2")) + }) + + It("should delete Cluster CR cleanly", func() { + By("Deleting Cluster CR") + err := k8sClient.Delete(ctx, cluster) + Expect(err).NotTo(HaveOccurred()) + + By("Verifying Cluster CR is deleted") + Eventually(func() bool { + f := &controllerV1.Cluster{} + err := k8sClient.Get(ctx, types.NamespacedName{Name: name, Namespace: ns}, f) + return k8serrors.IsNotFound(err) + }, 30*time.Second, 2*time.Second).Should(BeTrue()) + }) +}) diff --git a/e2e/project_test.go b/e2e/project_test.go new file mode 100644 index 000000000..94e5b9f97 --- /dev/null +++ b/e2e/project_test.go @@ -0,0 +1,120 @@ +package e2e + +import ( + "context" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + controllerv1alpha1 "github.com/kubeslice/kubeslice-controller/apis/controller/v1alpha1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" +) + +var _ = Describe("Project E2E", func() { + const ( + ProjectName = "cisco" + ProjectNamespace = "default" + timeout = time.Second * 30 + interval = time.Millisecond * 250 + ) + + ctx := context.Background() + + AfterEach(func() { + By("🧹 Cleaning up Project after each test") + project := &controllerv1alpha1.Project{} + err := k8sClient.Get(ctx, types.NamespacedName{Name: ProjectName, Namespace: ProjectNamespace}, project) + if err == nil { + // Attempt delete + _ = k8sClient.Delete(ctx, project) + + Eventually(func() bool { + err := k8sClient.Get(ctx, types.NamespacedName{Name: ProjectName, Namespace: ProjectNamespace}, project) + return err != nil + }, timeout*2, interval).Should(BeTrue(), "Project should be fully deleted before next test starts") + } + }) + + It("should create a Project successfully", func() { + By("creating a Project resource") + project := &controllerv1alpha1.Project{ + ObjectMeta: metav1.ObjectMeta{ + Name: ProjectName, + Namespace: ProjectNamespace, + }, + Spec: controllerv1alpha1.ProjectSpec{ + ServiceAccount: controllerv1alpha1.ServiceAccount{ + ReadWrite: []string{"john"}, + }, + DefaultSliceCreation: false, + }, + } + + Expect(k8sClient.Create(ctx, project)).Should(Succeed()) + + By("verifying the Project was created") + created := &controllerv1alpha1.Project{} + Eventually(func() bool { + err := k8sClient.Get(ctx, types.NamespacedName{Name: ProjectName, Namespace: ProjectNamespace}, created) + return err == nil + }, timeout, interval).Should(BeTrue()) + + Expect(created.Spec.ServiceAccount.ReadWrite).To(ContainElement("john")) + }) + + It("should update the Project with new service accounts", func() { + By("creating a Project resource") + project := &controllerv1alpha1.Project{ + ObjectMeta: metav1.ObjectMeta{ + Name: ProjectName, + Namespace: ProjectNamespace, + }, + Spec: controllerv1alpha1.ProjectSpec{ + ServiceAccount: controllerv1alpha1.ServiceAccount{ + ReadWrite: []string{"john"}, + }, + }, + } + Expect(k8sClient.Create(ctx, project)).Should(Succeed()) + + By("updating the Project with ReadOnly user") + Eventually(func() error { + err := k8sClient.Get(ctx, types.NamespacedName{Name: ProjectName, Namespace: ProjectNamespace}, project) + if err != nil { + return err + } + project.Spec.ServiceAccount.ReadOnly = []string{"alice"} + return k8sClient.Update(ctx, project) + }, timeout, interval).Should(Succeed()) + + By("verifying the Project was updated") + updated := &controllerv1alpha1.Project{} + Eventually(func() []string { + _ = k8sClient.Get(ctx, types.NamespacedName{Name: ProjectName, Namespace: ProjectNamespace}, updated) + return updated.Spec.ServiceAccount.ReadOnly + }, timeout, interval).Should(ContainElement("alice")) + }) + + It("should delete a Project successfully", func() { + By("creating a Project resource") + project := &controllerv1alpha1.Project{ + ObjectMeta: metav1.ObjectMeta{ + Name: ProjectName, + Namespace: ProjectNamespace, + }, + } + Expect(k8sClient.Create(ctx, project)).Should(Succeed()) + + By("deleting the Project") + Expect(k8sClient.Delete(ctx, project)).Should(Succeed()) + + By("verifying the Project is deleted") + deleted := &controllerv1alpha1.Project{} + Eventually(func() bool { + err := k8sClient.Get(ctx, types.NamespacedName{Name: ProjectName, Namespace: ProjectNamespace}, deleted) + return err != nil + }, timeout, interval).Should(BeTrue()) + }) +}) diff --git a/e2e/serviceexport_test.go b/e2e/serviceexport_test.go new file mode 100644 index 000000000..d82399cfd --- /dev/null +++ b/e2e/serviceexport_test.go @@ -0,0 +1,88 @@ +package e2e + +import ( + "context" + "fmt" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + controllerv1alpha1 "github.com/kubeslice/kubeslice-controller/apis/controller/v1alpha1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" +) + +var _ = Describe("ServiceExportConfig Controller", func() { + Context("When creating a ServiceExportConfig", func() { + It("Should reconcile and set labels correctly", func() { + ctx := context.Background() + + sliceName := "red-" + fmt.Sprintf("%d", time.Now().UnixNano()) + + slice := &controllerv1alpha1.SliceConfig{ + ObjectMeta: metav1.ObjectMeta{ + Name: sliceName, + Namespace: controlPlaneNamespace, + }, + Spec: controllerv1alpha1.SliceConfigSpec{ + Clusters: []string{"cluster-1"}, + MaxClusters: 2, + }, + } + Expect(k8sClient.Create(ctx, slice)).Should(Succeed()) + + // ServiceExportConfig + sec := &controllerv1alpha1.ServiceExportConfig{ + ObjectMeta: metav1.ObjectMeta{ + Name: "mysql-alpha-" + sliceName + "-cluster-1", + Namespace: controlPlaneNamespace, + }, + Spec: controllerv1alpha1.ServiceExportConfigSpec{ + ServiceName: "mysql", + ServiceNamespace: "alpha", + SourceCluster: "cluster-1", + SliceName: sliceName, + ServiceDiscoveryPorts: []controllerv1alpha1.ServiceDiscoveryPort{ + { + Name: "tcp", + Protocol: "tcp", + Port: 3306, + }, + }, + ServiceDiscoveryEndpoints: []controllerv1alpha1.ServiceDiscoveryEndpoint{ + { + PodName: "mysql-pod-abc", + NsmIp: "10.1.1.1", + DnsName: "mysql." + sliceName + ".slice.local", + Cluster: "cluster-1", + Port: 3306, + }, + }, + }, + } + Expect(k8sClient.Create(ctx, sec)).Should(Succeed()) + + // Verify reconciliation applied labels + created := &controllerv1alpha1.ServiceExportConfig{} + Eventually(func() bool { + err := k8sClient.Get(ctx, types.NamespacedName{ + Name: sec.Name, + Namespace: controlPlaneNamespace, + }, created) + if err != nil { + return false + } + return created.Labels["original-slice-name"] == sliceName && + created.Labels["worker-cluster"] == "cluster-1" && + created.Labels["service-name"] == "mysql" && + created.Labels["service-namespace"] == "alpha" + + }, timeout, interval).Should(BeTrue()) + + // Cleanup + Expect(k8sClient.Delete(ctx, sec)).Should(Succeed()) + Expect(k8sClient.Delete(ctx, slice)).Should(Succeed()) + }) + }) +}) diff --git a/e2e/sliceconfig_test.go b/e2e/sliceconfig_test.go new file mode 100644 index 000000000..63bca02e2 --- /dev/null +++ b/e2e/sliceconfig_test.go @@ -0,0 +1,106 @@ +package e2e + +import ( + "context" + "time" + + controllerv1alpha1 "github.com/kubeslice/kubeslice-controller/apis/controller/v1alpha1" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +var _ = Describe("SliceConfig E2E tests", func() { + const ( + sliceConfigName = "e2e-sliceconfig" + namespace = "kubeslice-controller" + timeout = time.Second * 30 + interval = time.Millisecond * 500 + ) + + var ctx context.Context + + BeforeEach(func() { + ctx = context.TODO() + }) + + It("should create a SliceConfig successfully", func() { + By("creating a SliceConfig custom resource") + sc := &controllerv1alpha1.SliceConfig{ + ObjectMeta: metav1.ObjectMeta{ + Name: sliceConfigName, + Namespace: namespace, + }, + Spec: controllerv1alpha1.SliceConfigSpec{ + SliceType: "Application", + MaxClusters: 2, + OverlayNetworkDeploymentMode: "single-network", + Clusters: []string{"worker-1", "worker-2"}, + NamespaceIsolationProfile: controllerv1alpha1.NamespaceIsolationProfile{ + IsolationEnabled: true, + ApplicationNamespaces: []controllerv1alpha1.SliceNamespaceSelection{ + { + Namespace: "test-01", + Clusters: []string{"worker-1"}, + }, + { + Namespace: "test-02", + Clusters: []string{"worker-2"}, + }, + }, + }, + }, + } + + Expect(k8sClient.Create(ctx, sc)).Should(Succeed()) + + By("verifying the SliceConfig exists") + createdSC := &controllerv1alpha1.SliceConfig{} + Eventually(func() error { + return k8sClient.Get(ctx, ObjectKey(namespace, sliceConfigName), createdSC) + }, timeout, interval).Should(Succeed()) + + Expect(createdSC.Spec.SliceType).To(Equal("Application")) + Expect(createdSC.Spec.MaxClusters).To(Equal(2)) + Expect(string(createdSC.Spec.OverlayNetworkDeploymentMode)).To(Equal("single-network")) + }) + + It("should update SliceConfig successfully", func() { + By("updating the MaxClusters field") + updatedSC := &controllerv1alpha1.SliceConfig{} + Eventually(func() error { + return k8sClient.Get(ctx, ObjectKey(namespace, sliceConfigName), updatedSC) + }, timeout, interval).Should(Succeed()) + + updatedSC.Spec.MaxClusters = 4 + Eventually(func() error { + // Always get the latest version before updating + latest := &controllerv1alpha1.SliceConfig{} + if err := k8sClient.Get(ctx, ObjectKey(namespace, sliceConfigName), latest); err != nil { + return err + } + latest.Spec.MaxClusters = 4 + return k8sClient.Update(ctx, latest) + }, timeout, interval).Should(Succeed()) + + By("verifying the updated MaxClusters field") + Eventually(func() int { + _ = k8sClient.Get(ctx, ObjectKey(namespace, sliceConfigName), updatedSC) + return updatedSC.Spec.MaxClusters + }, timeout, interval).Should(Equal(4)) + }) + + It("should delete SliceConfig successfully", func() { + By("deleting the SliceConfig custom resource") + deleteSC := &controllerv1alpha1.SliceConfig{} + Expect(k8sClient.Get(ctx, ObjectKey(namespace, sliceConfigName), deleteSC)).Should(Succeed()) + Expect(k8sClient.Delete(ctx, deleteSC)).Should(Succeed()) + + By("verifying the SliceConfig is deleted") + Eventually(func() bool { + err := k8sClient.Get(ctx, ObjectKey(namespace, sliceConfigName), deleteSC) + return err != nil + }, timeout, interval).Should(BeTrue()) + }) +}) diff --git a/e2e/sliceqosconfig_test.go b/e2e/sliceqosconfig_test.go new file mode 100644 index 000000000..71bd3c298 --- /dev/null +++ b/e2e/sliceqosconfig_test.go @@ -0,0 +1,131 @@ +package e2e + +import ( + "time" + + controllerv1alpha1 "github.com/kubeslice/kubeslice-controller/apis/controller/v1alpha1" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +var _ = Describe("SliceQoSConfig E2E Tests", func() { + const ( + testNamespace = "default" + qosName = "profile1" + timeout = time.Second * 10 + interval = time.Millisecond * 250 + ) + + AfterEach(func() { + By("Cleaning up SliceQoSConfig after each test") + qos := &controllerv1alpha1.SliceQoSConfig{} + err := k8sClient.Get(ctx, ObjectKey(testNamespace, qosName), qos) + if err == nil { + Expect(k8sClient.Delete(ctx, qos)).To(Succeed()) + } + }) + + It("should create a SliceQoSConfig successfully", func() { + By("Creating a new SliceQoSConfig") + qos := &controllerv1alpha1.SliceQoSConfig{ + ObjectMeta: metav1.ObjectMeta{ + Name: qosName, + Namespace: testNamespace, + }, + Spec: controllerv1alpha1.SliceQoSConfigSpec{ + QueueType: "HTB", + Priority: 1, + TcType: "BANDWIDTH_CONTROL", + BandwidthCeilingKbps: 5120, + BandwidthGuaranteedKbps: 2562, + DscpClass: "AF11", + }, + } + Expect(k8sClient.Create(ctx, qos)).To(Succeed()) + + By("Verifying the SliceQoSConfig exists") + Eventually(func() bool { + fetched := &controllerv1alpha1.SliceQoSConfig{} + err := k8sClient.Get(ctx, ObjectKey(testNamespace, qosName), fetched) + return err == nil + }, timeout, interval).Should(BeTrue()) + }) + + It("should update an existing SliceQoSConfig", func() { + By("Ensuring any previous SliceQoSConfig is fully deleted before re-creating") + Eventually(func() bool { + err := k8sClient.Get(ctx, ObjectKey(testNamespace, qosName), &controllerv1alpha1.SliceQoSConfig{}) + return err != nil + }, timeout*3, interval).Should(BeTrue()) + + By("Creating the SliceQoSConfig first") + qos := &controllerv1alpha1.SliceQoSConfig{ + ObjectMeta: metav1.ObjectMeta{ + Name: qosName, + Namespace: testNamespace, + }, + Spec: controllerv1alpha1.SliceQoSConfigSpec{ + QueueType: "HTB", + Priority: 1, + TcType: "BANDWIDTH_CONTROL", + BandwidthCeilingKbps: 5120, + BandwidthGuaranteedKbps: 2562, + DscpClass: "AF11", + }, + } + Expect(k8sClient.Create(ctx, qos)).To(Succeed()) + + By("Updating the Priority field") + Eventually(func() error { + fetched := &controllerv1alpha1.SliceQoSConfig{} + if err := k8sClient.Get(ctx, ObjectKey(testNamespace, qosName), fetched); err != nil { + return err + } + fetched.Spec.Priority = 5 + return k8sClient.Update(ctx, fetched) + }, timeout, interval).Should(Succeed()) + + By("Verifying the updated Priority field") + Eventually(func() int { + fetched := &controllerv1alpha1.SliceQoSConfig{} + _ = k8sClient.Get(ctx, ObjectKey(testNamespace, qosName), fetched) + return fetched.Spec.Priority + }, timeout, interval).Should(Equal(5)) + }) + + It("should delete an existing SliceQoSConfig", func() { + By("Ensuring any previous SliceQoSConfig is fully deleted before re-creating") + Eventually(func() bool { + err := k8sClient.Get(ctx, ObjectKey(testNamespace, qosName), &controllerv1alpha1.SliceQoSConfig{}) + return err != nil + }, timeout*3, interval).Should(BeTrue()) + + By("Creating the SliceQoSConfig first") + qos := &controllerv1alpha1.SliceQoSConfig{ + ObjectMeta: metav1.ObjectMeta{ + Name: qosName, + Namespace: testNamespace, + }, + Spec: controllerv1alpha1.SliceQoSConfigSpec{ + QueueType: "HTB", + Priority: 1, + TcType: "BANDWIDTH_CONTROL", + BandwidthCeilingKbps: 5120, + BandwidthGuaranteedKbps: 2562, + DscpClass: "AF11", + }, + } + Expect(k8sClient.Create(ctx, qos)).To(Succeed()) + + By("Deleting the SliceQoSConfig") + Expect(k8sClient.Delete(ctx, qos)).To(Succeed()) + + By("Verifying the SliceQoSConfig is deleted") + Eventually(func() bool { + err := k8sClient.Get(ctx, ObjectKey(testNamespace, qosName), qos) + return err != nil + }, timeout*3, interval).Should(BeTrue()) + }) + +}) diff --git a/e2e/suite_test.go b/e2e/suite_test.go new file mode 100644 index 000000000..3c0d0aa10 --- /dev/null +++ b/e2e/suite_test.go @@ -0,0 +1,76 @@ +package e2e + +import ( + "context" + "testing" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" + + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/config" + + controllerV1alpha1 "github.com/kubeslice/kubeslice-controller/apis/controller/v1alpha1" +) + +// Global variables for E2E tests +var ( + k8sClient client.Client + ctx context.Context + scheme = runtime.NewScheme() +) + +const ( + controlPlaneNamespace = "system" + + timeout = time.Second * 30 + interval = time.Millisecond * 500 +) + +func ObjectKey(namespace, name string) client.ObjectKey { + return client.ObjectKey{Namespace: namespace, Name: name} +} + +func TestE2E(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "KubeSlice E2E Suite") +} + +var _ = BeforeSuite(func() { + By("Bootstrapping test environment") + + // Register Kubernetes core + custom schemes + utilruntime.Must(clientgoscheme.AddToScheme(scheme)) + utilruntime.Must(controllerV1alpha1.AddToScheme(scheme)) + + // Load kubeconfig + cfg, err := config.GetConfig() + Expect(err).NotTo(HaveOccurred()) + + // Create k8s client + k8sClient, err = client.New(cfg, client.Options{Scheme: scheme}) + Expect(err).NotTo(HaveOccurred()) + + ctx = context.Background() + + namespaces := []string{"kubeslice-controller", "system"} + for _, ns := range namespaces { + nsObj := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: ns}} + err := k8sClient.Create(ctx, nsObj) + if err != nil && !apierrors.IsAlreadyExists(err) { + Fail("Failed to create namespace " + ns + ": " + err.Error()) + } + } +}) + +var _ = AfterSuite(func() { + By("Tearing down test environment (if needed)") +}) diff --git a/e2e/vpnkeyrotation_test.go b/e2e/vpnkeyrotation_test.go new file mode 100644 index 000000000..d841d2333 --- /dev/null +++ b/e2e/vpnkeyrotation_test.go @@ -0,0 +1,132 @@ +package e2e + +import ( + "context" + "fmt" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + controllerv1alpha1 "github.com/kubeslice/kubeslice-controller/apis/controller/v1alpha1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" +) + +var _ = Describe("VpnKeyRotation Controller E2E Tests", func() { + var ( + ctx context.Context + namespace string + sliceName string + vpnCR *controllerv1alpha1.VpnKeyRotation + key types.NamespacedName + ) + + BeforeEach(func() { + ctx = context.Background() + namespace = "kubeslice-controller" + sliceName = "test-slice-" + fmt.Sprintf("%d", time.Now().UnixNano()) + + vpnCR = &controllerv1alpha1.VpnKeyRotation{ + ObjectMeta: metav1.ObjectMeta{ + Name: sliceName, + Namespace: namespace, + }, + Spec: controllerv1alpha1.VpnKeyRotationSpec{ + SliceName: sliceName, + RotationInterval: 1, + Clusters: []string{"cluster1", "cluster2"}, + }, + } + + key = types.NamespacedName{Name: vpnCR.Name, Namespace: vpnCR.Namespace} + + // Ensure CR exists before each test + By("creating the VpnKeyRotation resource before each test") + Expect(k8sClient.Create(ctx, vpnCR)).Should(Succeed()) + + Eventually(func() error { + f := &controllerv1alpha1.VpnKeyRotation{} + return k8sClient.Get(ctx, key, f) + }, time.Second*10, time.Millisecond*250).Should(Succeed()) + }) + + AfterEach(func() { + // Cleanup after each test + _ = k8sClient.Delete(ctx, vpnCR) + }) + + It("should create a VpnKeyRotation CR successfully", func() { + fetched := &controllerv1alpha1.VpnKeyRotation{} + By("fetching the created resource") + Expect(k8sClient.Get(ctx, key, fetched)).Should(Succeed()) + + Expect(fetched.Spec.SliceName).To(Equal(sliceName)) + Expect(fetched.Spec.RotationInterval).To(Equal(1)) + Expect(fetched.Spec.Clusters).To(ContainElements("cluster1", "cluster2")) + }) + + It("should update ClusterGatewayMapping after reconciliation", func() { + fetched := &controllerv1alpha1.VpnKeyRotation{} + Expect(k8sClient.Get(ctx, key, fetched)).Should(Succeed()) + + By("ensuring ClusterGatewayMapping is initially empty") + Expect(fetched.Spec.ClusterGatewayMapping).To(BeEmpty()) + + By("simulating reconciliation by updating mapping") + fetched.Spec.ClusterGatewayMapping = map[string][]string{ + "cluster1": {"gw1", "gw2"}, + "cluster2": {"gw3"}, + } + Expect(k8sClient.Update(ctx, fetched)).Should(Succeed()) + + Eventually(func() map[string][]string { + _ = k8sClient.Get(ctx, key, fetched) + return fetched.Spec.ClusterGatewayMapping + }, time.Second*10, time.Millisecond*250).Should(HaveKeyWithValue("cluster1", []string{"gw1", "gw2"})) + Expect(fetched.Spec.ClusterGatewayMapping).To(HaveKeyWithValue("cluster2", []string{"gw3"})) + }) + + It("should update CertificateCreationTime and CertificateExpiryTime after rotation", func() { + fetched := &controllerv1alpha1.VpnKeyRotation{} + Expect(k8sClient.Get(ctx, key, fetched)).Should(Succeed()) + + now := metav1.Now() + expiry := metav1.NewTime(now.Add(24 * time.Hour)) + + fetched.Spec.CertificateCreationTime = &now + fetched.Spec.CertificateExpiryTime = &expiry + Expect(k8sClient.Update(ctx, fetched)).Should(Succeed()) + + Eventually(func() bool { + _ = k8sClient.Get(ctx, key, fetched) + return fetched.Spec.CertificateCreationTime != nil && fetched.Spec.CertificateExpiryTime != nil + }, time.Second*10, time.Millisecond*250).Should(BeTrue()) + + Expect(fetched.Spec.CertificateExpiryTime.Sub(fetched.Spec.CertificateCreationTime.Time)). + To(BeNumerically("~", 24*time.Hour, time.Minute)) + }) + + It("should increment RotationCount after certificate expiry", func() { + fetched := &controllerv1alpha1.VpnKeyRotation{} + Expect(k8sClient.Get(ctx, key, fetched)).Should(Succeed()) + + expired := metav1.NewTime(metav1.Now().Add(-time.Hour)) + fetched.Spec.CertificateExpiryTime = &expired + fetched.Spec.RotationCount = 1 + Expect(k8sClient.Update(ctx, fetched)).Should(Succeed()) + + Eventually(func() int { + _ = k8sClient.Get(ctx, key, fetched) + if metav1.Now().After(fetched.Spec.CertificateExpiryTime.Time) { + fetched.Spec.RotationCount++ + now := metav1.Now() + expiry := metav1.NewTime(now.Add(24 * time.Hour)) + fetched.Spec.CertificateCreationTime = &now + fetched.Spec.CertificateExpiryTime = &expiry + _ = k8sClient.Update(ctx, fetched) + } + return fetched.Spec.RotationCount + }, time.Second*10, time.Millisecond*250).Should(Equal(2)) + }) +}) diff --git a/go.mod b/go.mod index 10664a66c..24ab93516 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/kubeslice/kubeslice-controller -go 1.24.0 +go 1.24 require ( bou.ke/monkey v1.0.2