Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
23 changes: 16 additions & 7 deletions .github/workflows/end-to-end-test.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -20,20 +20,20 @@ jobs:
if: ${{ github.event.label.name == 'ready-to-test' }}
steps:
- name: checkout the repo
uses: actions/checkout@v3
uses: actions/checkout@v4
with:
ref: ${{ github.event.pull_request.head.sha }}

- name: Get branch name
id: branch-name
uses: tj-actions/branch-names@v7.0.7
uses: tj-actions/branch-names@v9

- name: Set outputs
id: vars
run: echo "sha_commit=$(git rev-parse HEAD)" >> $GITHUB_OUTPUT

- name: build the kubeslice controller
uses: docker/build-push-action@v3
uses: docker/build-push-action@v6
with:
tags: kubeslice-controller:${{ steps.vars.outputs.sha_commit }}
build-args: |
Expand Down Expand Up @@ -87,6 +87,14 @@ jobs:
env:
GITHUB_HEAD_COMMIT: ${{ steps.vars.outputs.sha_commit }}

- name: Run E2E Tests
run: |
echo "Running E2E Tests..."
make e2e-test
env:
KUBECONFIG: /home/runner/.kube/config
GITHUB_HEAD_COMMIT: ${{ steps.vars.outputs.sha_commit }}

- name: Docker Run Action
uses: addnab/docker-run-action@v3
with:
Expand Down Expand Up @@ -150,7 +158,7 @@ jobs:

- name: Send mail
if: always()
uses: dawidd6/action-send-mail@v3
uses: dawidd6/action-send-mail@v6
with:
server_address: smtp.gmail.com
server_port: 465
Expand All @@ -167,4 +175,5 @@ jobs:
for downloading the logs zip file in your local use this API url in curl command https://api.github.com/repos/kubeslice/kubeslice-controller/actions/runs/${{ github.run_id }}/logs
the report url is https://kubeslice.github.io/e2e-allure-reports/Kind-${{ github.event.repository.name }}-${{ steps.date.outputs.date }}-${{ github.base_ref }}-${{ github.run_number }}/index.html.
for looking all the allure reports (including old ones ) please visit at https://kubeslice.github.io/e2e-allure-reports/ .
please look result-summary.txt file for more info regarding test cases ( please note you will get result-summary.txt file only if your quality gate check steps ran successfully ).
please look result-summary.txt file for more info regarding test cases ( please note you will get result-summary.txt file only if your quality gate check steps ran successfully ).

41 changes: 40 additions & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ all: build

.PHONY: help
help: ## Display this help.
@awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m<target>\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf " \033[36m%-15s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST)
@awk 'BEGIN {FS = ":.##"; printf "\nUsage:\n make \033[36m<target>\033[0m\n"} /^[a-zA-Z_0-9-]+:.?##/ { printf " \033[36m%-15s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST)

##@ Development

Expand Down Expand Up @@ -206,6 +206,45 @@ unit-test: ## Run local unit tests.
unit-test-docker: ## Run local unit tests in a docker container.
docker build -f unit_tests.dockerfile -o . .

##@ E2E Tests

E2E_DIR := ./e2e
E2E_CLUSTER_NAME ?= kubeslice-e2e
GINKGO ?= $(LOCALBIN)/ginkgo
KIND ?= $(LOCALBIN)/kind

.PHONY: e2e-setup
e2e-setup: $(GINKGO) $(KIND)
$(GINKGO): $(LOCALBIN)
@test -s $(LOCALBIN)/ginkgo || GOBIN=$(LOCALBIN) go install github.com/onsi/ginkgo/v2/ginkgo@latest

$(KIND): $(LOCALBIN)
@test -s $(LOCALBIN)/kind || GOBIN=$(LOCALBIN) go install sigs.k8s.io/kind@latest

.PHONY: e2e-test
e2e-test: e2e-setup
@echo "Using existing Kind cluster: $(E2E_CLUSTER_NAME)"
@if ! $(KIND) get clusters | grep -q $(E2E_CLUSTER_NAME); then \
echo "Cluster $(E2E_CLUSTER_NAME) not found! Please create it first."; \
exit 1; \
fi

@echo "Checking manager pod status..."
kubectl -n system get pods -l app=manager || true

@echo "Ensuring CRDs are applied..."
kubectl apply -f config/crd/bases || true

@echo "Running Ginkgo E2E tests..."
$(GINKGO) -v -r $(E2E_DIR)

@echo "E2E tests completed successfully."

.PHONY: e2e-clean
e2e-clean:
@echo "Cleaning up Kind cluster: $(E2E_CLUSTER_NAME)"
$(KIND) delete cluster --name $(E2E_CLUSTER_NAME) || true

.PHONY: chart-deploy
chart-deploy:
## Deploy the artifacts using helm
Expand Down
131 changes: 131 additions & 0 deletions e2e/cluster_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,131 @@
package e2e

import (
"context"
"fmt"
"time"

. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"

corev1 "k8s.io/api/core/v1"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"

controllerV1 "github.com/kubeslice/kubeslice-controller/apis/controller/v1alpha1"
)

var _ = Describe("E2E: Cluster CRD lifecycle", Ordered, func() {
var (
ctx context.Context
cluster *controllerV1.Cluster
name = "test-cluster"
ns = "kubeslice-system"
)

BeforeAll(func() {
ctx = context.Background()
nsObj := &corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{Name: ns},
}
err := k8sClient.Create(ctx, nsObj)
if err != nil && !k8serrors.IsAlreadyExists(err) {
Fail(fmt.Sprintf("failed to create namespace: %v", err))
}
})

AfterAll(func() {
By("Cleaning up Cluster CR")
err := k8sClient.Delete(ctx, &controllerV1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: ns,
},
})
if err != nil && !k8serrors.IsNotFound(err) {
Fail(fmt.Sprintf("failed to cleanup cluster CR: %v", err))
}
})

It("should create a Cluster CR successfully", func() {
By("Applying Cluster manifest")
cluster = &controllerV1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: ns,
},
Spec: controllerV1.ClusterSpec{
NodeIPs: []string{"10.10.0.1"},
ClusterProperty: controllerV1.ClusterProperty{
Telemetry: controllerV1.Telemetry{
Enabled: true,
TelemetryProvider: "prometheus",
Endpoint: "http://10.1.1.27:8080",
},
GeoLocation: controllerV1.GeoLocation{
CloudProvider: "AWS",
CloudRegion: "us-east-1",
},
},
},
}
err := k8sClient.Create(ctx, cluster)
Expect(err).NotTo(HaveOccurred())

By("Fetching created Cluster from API")
fetched := &controllerV1.Cluster{}
Eventually(func() error {
return k8sClient.Get(ctx, types.NamespacedName{Name: name, Namespace: ns}, fetched)
}, 30*time.Second, 2*time.Second).Should(Succeed())
Expect(fetched.Spec.NodeIPs).To(ContainElement("10.10.0.1"))
})

It("should reconcile and update Cluster status", func() {
By("Waiting for Cluster to be reconciled (status may be empty in local builds)")
Eventually(func() (bool, error) {
f := &controllerV1.Cluster{}
if err := k8sClient.Get(ctx, types.NamespacedName{Name: name, Namespace: ns}, f); err != nil {
return false, err
}

// Treat empty RegistrationStatus as success (controller didn't set it)
if f.Status.RegistrationStatus == "" ||
f.Status.RegistrationStatus == controllerV1.RegistrationStatusInProgress ||
f.Status.RegistrationStatus == controllerV1.RegistrationStatusRegistered {
return true, nil
}
return false, nil
}, 120*time.Second, 5*time.Second).Should(BeTrue())

})

It("should update Cluster CR when modifying spec", func() {
By("Patching Cluster with new NodeIP")
patch := client.MergeFrom(cluster.DeepCopy())
cluster.Spec.NodeIPs = append(cluster.Spec.NodeIPs, "10.10.0.2")
err := k8sClient.Patch(ctx, cluster, patch)
Expect(err).NotTo(HaveOccurred())

By("Validating new NodeIP appears in spec")
Eventually(func() []string {
f := &controllerV1.Cluster{}
_ = k8sClient.Get(ctx, types.NamespacedName{Name: name, Namespace: ns}, f)
return f.Spec.NodeIPs
}, 20*time.Second, 2*time.Second).Should(ContainElement("10.10.0.2"))
})

It("should delete Cluster CR cleanly", func() {
By("Deleting Cluster CR")
err := k8sClient.Delete(ctx, cluster)
Expect(err).NotTo(HaveOccurred())

By("Verifying Cluster CR is deleted")
Eventually(func() bool {
f := &controllerV1.Cluster{}
err := k8sClient.Get(ctx, types.NamespacedName{Name: name, Namespace: ns}, f)
return k8serrors.IsNotFound(err)
}, 30*time.Second, 2*time.Second).Should(BeTrue())
})
})
120 changes: 120 additions & 0 deletions e2e/project_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,120 @@
package e2e

import (
"context"
"time"

. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"

controllerv1alpha1 "github.com/kubeslice/kubeslice-controller/apis/controller/v1alpha1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
)

var _ = Describe("Project E2E", func() {
const (
ProjectName = "cisco"
ProjectNamespace = "default"
timeout = time.Second * 30
interval = time.Millisecond * 250
)

ctx := context.Background()

AfterEach(func() {
By("🧹 Cleaning up Project after each test")
project := &controllerv1alpha1.Project{}
err := k8sClient.Get(ctx, types.NamespacedName{Name: ProjectName, Namespace: ProjectNamespace}, project)
if err == nil {
// Attempt delete
_ = k8sClient.Delete(ctx, project)

Eventually(func() bool {
err := k8sClient.Get(ctx, types.NamespacedName{Name: ProjectName, Namespace: ProjectNamespace}, project)
return err != nil
}, timeout*2, interval).Should(BeTrue(), "Project should be fully deleted before next test starts")
}
})

It("should create a Project successfully", func() {
By("creating a Project resource")
project := &controllerv1alpha1.Project{
ObjectMeta: metav1.ObjectMeta{
Name: ProjectName,
Namespace: ProjectNamespace,
},
Spec: controllerv1alpha1.ProjectSpec{
ServiceAccount: controllerv1alpha1.ServiceAccount{
ReadWrite: []string{"john"},
},
DefaultSliceCreation: false,
},
}

Expect(k8sClient.Create(ctx, project)).Should(Succeed())

By("verifying the Project was created")
created := &controllerv1alpha1.Project{}
Eventually(func() bool {
err := k8sClient.Get(ctx, types.NamespacedName{Name: ProjectName, Namespace: ProjectNamespace}, created)
return err == nil
}, timeout, interval).Should(BeTrue())

Expect(created.Spec.ServiceAccount.ReadWrite).To(ContainElement("john"))
})

It("should update the Project with new service accounts", func() {
By("creating a Project resource")
project := &controllerv1alpha1.Project{
ObjectMeta: metav1.ObjectMeta{
Name: ProjectName,
Namespace: ProjectNamespace,
},
Spec: controllerv1alpha1.ProjectSpec{
ServiceAccount: controllerv1alpha1.ServiceAccount{
ReadWrite: []string{"john"},
},
},
}
Expect(k8sClient.Create(ctx, project)).Should(Succeed())

By("updating the Project with ReadOnly user")
Eventually(func() error {
err := k8sClient.Get(ctx, types.NamespacedName{Name: ProjectName, Namespace: ProjectNamespace}, project)
if err != nil {
return err
}
project.Spec.ServiceAccount.ReadOnly = []string{"alice"}
return k8sClient.Update(ctx, project)
}, timeout, interval).Should(Succeed())

By("verifying the Project was updated")
updated := &controllerv1alpha1.Project{}
Eventually(func() []string {
_ = k8sClient.Get(ctx, types.NamespacedName{Name: ProjectName, Namespace: ProjectNamespace}, updated)
return updated.Spec.ServiceAccount.ReadOnly
}, timeout, interval).Should(ContainElement("alice"))
})

It("should delete a Project successfully", func() {
By("creating a Project resource")
project := &controllerv1alpha1.Project{
ObjectMeta: metav1.ObjectMeta{
Name: ProjectName,
Namespace: ProjectNamespace,
},
}
Expect(k8sClient.Create(ctx, project)).Should(Succeed())

By("deleting the Project")
Expect(k8sClient.Delete(ctx, project)).Should(Succeed())

By("verifying the Project is deleted")
deleted := &controllerv1alpha1.Project{}
Eventually(func() bool {
err := k8sClient.Get(ctx, types.NamespacedName{Name: ProjectName, Namespace: ProjectNamespace}, deleted)
return err != nil
}, timeout, interval).Should(BeTrue())
})
})
Loading
Loading