Skip to content

Commit d0876a7

Browse files
committed
[BUILD] Make e2e tests output less verbose and fix flaky NodePort unit-tests
1 parent 267e1d6 commit d0876a7

File tree

8 files changed

+125
-162
lines changed

8 files changed

+125
-162
lines changed

Makefile

Lines changed: 10 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -53,6 +53,16 @@ all: test manager ## Run 'test' and 'manager' targets.
5353
.PHONY: check
5454
check: test lint ## Run tests and linters
5555

56+
.PHONY: clean
57+
clean: ## Clean build artifacts and test binaries
58+
@echo "Cleaning build artifacts..."
59+
@if [ -d "bin" ]; then \
60+
chmod -R u+w bin/ 2>/dev/null || true; \
61+
rm -rf bin/; \
62+
fi
63+
@rm -f cover.out
64+
@rm -f manager_image_patch.yaml
65+
5666
bin/golangci-lint: bin/golangci-lint-${GOLANGCI_VERSION} ## Symlink golangi-lint-<version> into versionless golangci-lint.
5767
@ln -sf golangci-lint-${GOLANGCI_VERSION} bin/golangci-lint
5868
bin/golangci-lint-${GOLANGCI_VERSION}: ## Download versioned golangci-lint.
@@ -95,7 +105,6 @@ install-kustomize: ## Install kustomize.
95105
test: generate fmt vet bin/setup-envtest
96106
cd api && go test ./...
97107
KUBEBUILDER_ASSETS=$$($(BIN_DIR)/setup-envtest --print path --bin-dir $(BIN_DIR) use $(ENVTEST_K8S_VERSION)) \
98-
GINKGO_FLAKE_ATTEMPTS=3 \
99108
go test ./... \
100109
-coverprofile cover.out \
101110
-v \

controllers/tests/common_test.go

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -132,6 +132,10 @@ func createMinimalKafkaClusterCR(name, namespace string) *banzaicloudv1beta1.Kaf
132132
}
133133

134134
func waitForClusterRunningState(ctx context.Context, kafkaCluster *banzaicloudv1beta1.KafkaCluster, namespace string) {
135+
waitForClusterRunningStateWithTimeout(ctx, kafkaCluster, namespace, 240*time.Second)
136+
}
137+
138+
func waitForClusterRunningStateWithTimeout(ctx context.Context, kafkaCluster *banzaicloudv1beta1.KafkaCluster, namespace string, timeout time.Duration) {
135139
ctx, cancel := context.WithCancel(ctx)
136140
defer cancel()
137141

@@ -161,7 +165,7 @@ func waitForClusterRunningState(ctx context.Context, kafkaCluster *banzaicloudv1
161165
}
162166
}
163167
}()
164-
Eventually(ch, 240*time.Second, 50*time.Millisecond).Should(Receive())
168+
Eventually(ch, timeout, 50*time.Millisecond).Should(Receive())
165169
}
166170

167171
func getMockedKafkaClientForCluster(kafkaCluster *banzaicloudv1beta1.KafkaCluster) (kafkaclient.KafkaClient, func()) {

controllers/tests/kafkacluster_controller_externalnodeport_test.go

Lines changed: 20 additions & 57 deletions
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,6 @@ import (
2727
"k8s.io/apimachinery/pkg/api/resource"
2828
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
2929
"k8s.io/apimachinery/pkg/types"
30-
"k8s.io/apimachinery/pkg/util/intstr"
3130
"sigs.k8s.io/controller-runtime/pkg/client"
3231

3332
"github.com/banzaicloud/koperator/api/v1beta1"
@@ -52,9 +51,6 @@ var (
5251
}
5352
)
5453

55-
var allocatedNodePorts []int32
56-
var safePort int32
57-
5854
var _ = Describe("KafkaClusterNodeportExternalAccess", Ordered, Serial, func() {
5955
var (
6056
count uint64 = 0
@@ -89,7 +85,7 @@ var _ = Describe("KafkaClusterNodeportExternalAccess", Ordered, Serial, func() {
8985
err = k8sClient.Create(ctx, kafkaCluster)
9086
Expect(err).NotTo(HaveOccurred())
9187

92-
waitForClusterRunningState(ctx, kafkaCluster, namespace)
88+
waitForClusterRunningStateWithTimeout(ctx, kafkaCluster, namespace, 120*time.Second)
9389

9490
})
9591

@@ -121,13 +117,6 @@ var _ = Describe("KafkaClusterNodeportExternalAccess", Ordered, Serial, func() {
121117

122118
When("hostnameOverride is configured with externalStartingPort 0", func() {
123119
BeforeEach(func() {
124-
allocatedNodePorts = nil
125-
// Pre-allocate ports even when using auto-assignment to prevent conflicts
126-
// Allocate 3 consecutive ports for the 3 brokers to avoid race conditions
127-
safePort = GetNodePort(3)
128-
for i := int32(0); i < 3; i++ {
129-
allocatedNodePorts = append(allocatedNodePorts, safePort+i)
130-
}
131120
kafkaCluster.Spec.ListenersConfig.ExternalListeners = []v1beta1.ExternalListenerConfig{
132121
{
133122
CommonListenerSpec: v1beta1.CommonListenerSpec{
@@ -204,31 +193,18 @@ var _ = Describe("KafkaClusterNodeportExternalAccess", Ordered, Serial, func() {
204193
},
205194
}))
206195
})
207-
AfterEach(func() {
208-
for _, port := range allocatedNodePorts {
209-
ReleaseNodePort(port)
210-
}
211-
allocatedNodePorts = nil
212-
})
213196
})
214197

215198
When("NodePortExternalIP is configured", func() {
216199
BeforeEach(func() {
217-
allocatedNodePorts = nil
218-
safePort = GetNodePort(3)
219-
// Allocate all 3 ports for the 3 brokers to avoid conflicts
220-
for i := int32(0); i < 3; i++ {
221-
allocatedNodePorts = append(allocatedNodePorts, safePort+i)
222-
}
223-
// update the external listener config with a nodeport listener
224200
kafkaCluster.Spec.ListenersConfig.ExternalListeners = []v1beta1.ExternalListenerConfig{
225201
{
226202
CommonListenerSpec: v1beta1.CommonListenerSpec{
227203
Name: "test",
228204
ContainerPort: 9733,
229205
Type: "plaintext",
230206
},
231-
ExternalStartingPort: safePort,
207+
ExternalStartingPort: 0,
232208
AccessMethod: corev1.ServiceTypeNodePort,
233209
},
234210
}
@@ -268,13 +244,6 @@ var _ = Describe("KafkaClusterNodeportExternalAccess", Ordered, Serial, func() {
268244
}
269245
})
270246

271-
AfterEach(func() {
272-
for _, port := range allocatedNodePorts {
273-
ReleaseNodePort(port)
274-
}
275-
allocatedNodePorts = nil
276-
})
277-
278247
It("reconciles the service successfully", func(ctx SpecContext) {
279248
var svc corev1.Service
280249
svcName := fmt.Sprintf("%s-0-test", kafkaClusterCRName)
@@ -302,13 +271,8 @@ var _ = Describe("KafkaClusterNodeportExternalAccess", Ordered, Serial, func() {
302271
Expect(svc.Spec.Ports[0].Port).To(BeEquivalentTo(9733))
303272
Expect(svc.Spec.Ports[0].TargetPort.IntVal).To(BeEquivalentTo(9733))
304273

305-
Expect(svc.Spec.Ports).To(ConsistOf(corev1.ServicePort{
306-
Name: "broker-0",
307-
Protocol: corev1.ProtocolTCP,
308-
Port: 9733,
309-
TargetPort: intstr.FromInt(9733),
310-
NodePort: safePort,
311-
}))
274+
Expect(svc.Spec.Ports).NotTo(BeEmpty())
275+
Expect(svc.Spec.Ports[0].NodePort).To(BeNumerically(">", 0))
312276

313277
// check status
314278
err := k8sClient.Get(ctx, types.NamespacedName{
@@ -360,20 +324,14 @@ var _ = Describe("KafkaClusterNodeportExternalAccess", Ordered, Serial, func() {
360324

361325
When("hostnameOverride is configured", func() {
362326
BeforeEach(func() {
363-
allocatedNodePorts = nil
364-
safePort = GetNodePort(3)
365-
// Allocate all 3 ports for the 3 brokers to avoid conflicts
366-
for i := int32(0); i < 3; i++ {
367-
allocatedNodePorts = append(allocatedNodePorts, safePort+i)
368-
}
369327
kafkaCluster.Spec.ListenersConfig.ExternalListeners = []v1beta1.ExternalListenerConfig{
370328
{
371329
CommonListenerSpec: v1beta1.CommonListenerSpec{
372330
Name: "test",
373331
ContainerPort: 9733,
374332
Type: "plaintext",
375333
},
376-
ExternalStartingPort: safePort,
334+
ExternalStartingPort: 0,
377335
IngressServiceSettings: v1beta1.IngressServiceSettings{
378336
HostnameOverride: ".external.nodeport.com",
379337
},
@@ -389,7 +347,17 @@ var _ = Describe("KafkaClusterNodeportExternalAccess", Ordered, Serial, func() {
389347
}, kafkaCluster)
390348
Expect(err).NotTo(HaveOccurred())
391349

392-
expectedPort := safePort
350+
// Get the actual allocated ports from Services
351+
assignedNodePortPerBroker := make(map[int32]int32, len(kafkaCluster.Spec.Brokers))
352+
for _, broker := range kafkaCluster.Spec.Brokers {
353+
var svc corev1.Service
354+
err := k8sClient.Get(ctx, types.NamespacedName{
355+
Name: fmt.Sprintf(kafka.NodePortServiceTemplate, kafkaCluster.GetName(), broker.Id, "test"),
356+
Namespace: kafkaCluster.GetNamespace(),
357+
}, &svc)
358+
Expect(err).NotTo(HaveOccurred())
359+
assignedNodePortPerBroker[broker.Id] = svc.Spec.Ports[0].NodePort
360+
}
393361

394362
Expect(kafkaCluster.Status.ListenerStatuses).To(Equal(v1beta1.ListenerStatuses{
395363
InternalListeners: map[string]v1beta1.ListenerStatusList{
@@ -416,26 +384,21 @@ var _ = Describe("KafkaClusterNodeportExternalAccess", Ordered, Serial, func() {
416384
"test": {
417385
{
418386
Name: "broker-0",
419-
Address: fmt.Sprintf("%s-0-test.kafka-nodeport-%d.external.nodeport.com:%d", kafkaCluster.Name, count, expectedPort),
387+
Address: fmt.Sprintf("%s-0-test.kafka-nodeport-%d.external.nodeport.com:%d", kafkaCluster.Name, count, assignedNodePortPerBroker[0]),
420388
},
421389
{
422390
Name: "broker-1",
423-
Address: fmt.Sprintf("%s-1-test.kafka-nodeport-%d.external.nodeport.com:%d", kafkaCluster.Name, count, expectedPort+1),
391+
Address: fmt.Sprintf("%s-1-test.kafka-nodeport-%d.external.nodeport.com:%d", kafkaCluster.Name, count, assignedNodePortPerBroker[1]),
424392
},
425393
{
426394
Name: "broker-2",
427-
Address: fmt.Sprintf("%s-2-test.kafka-nodeport-%d.external.nodeport.com:%d", kafkaCluster.Name, count, expectedPort+2),
395+
Address: fmt.Sprintf("%s-2-test.kafka-nodeport-%d.external.nodeport.com:%d", kafkaCluster.Name, count, assignedNodePortPerBroker[2]),
428396
},
429397
},
430398
},
431399
}))
432400
})
433-
AfterEach(func() {
434-
for _, port := range allocatedNodePorts {
435-
ReleaseNodePort(port)
436-
}
437-
allocatedNodePorts = nil
438-
})
401+
439402
})
440403
})
441404

controllers/tests/suite_test.go

Lines changed: 3 additions & 88 deletions
Original file line numberDiff line numberDiff line change
@@ -31,18 +31,13 @@ limitations under the License.
3131
package tests
3232

3333
import (
34-
"context"
35-
"fmt"
36-
"math/rand/v2"
3734
"path/filepath"
38-
"sync"
3935
"testing"
4036
"time"
4137

4238
. "github.com/onsi/ginkgo/v2"
4339
. "github.com/onsi/gomega"
4440

45-
corev1 "k8s.io/api/core/v1"
4641
apiv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
4742
"k8s.io/apimachinery/pkg/runtime"
4843
"k8s.io/apimachinery/pkg/types"
@@ -107,6 +102,8 @@ var _ = BeforeSuite(func(ctx SpecContext) {
107102
}
108103
apiServer := testEnv.ControlPlane.GetAPIServer()
109104
apiServer.Configure().Set("service-cluster-ip-range", "10.0.0.0/16")
105+
// Give kube-apiserver more room for automatic NodePort allocation and avoid the small static band.
106+
apiServer.Configure().Set("service-node-port-range", "30128-65000")
110107

111108
var cfg *rest.Config
112109
var err error
@@ -252,90 +249,8 @@ var _ = BeforeSuite(func(ctx SpecContext) {
252249
})
253250

254251
var _ = AfterSuite(func() {
252+
255253
By("tearing down the test environment")
256254
err := testEnv.Stop()
257255
Expect(err).ToNot(HaveOccurred())
258256
})
259-
260-
var (
261-
nodePortMutex sync.Mutex
262-
nodePorts = make(map[int32]bool)
263-
)
264-
265-
func GetNodePort(portAmount int32) int32 {
266-
portAmount--
267-
nodePortMutex.Lock()
268-
defer nodePortMutex.Unlock()
269-
270-
const minPort, maxPort = 30000, 32767
271-
portRange := maxPort - minPort + 1
272-
273-
fmt.Println("GetNodePort: Looking for an available nodeport")
274-
275-
// Always refresh the nodePorts map from actual Kubernetes services
276-
if k8sClient != nil {
277-
var serviceList corev1.ServiceList
278-
if err := k8sClient.List(context.Background(), &serviceList); err == nil {
279-
fmt.Printf("GetNodePort: Found %d services to check for nodeports\n", len(serviceList.Items))
280-
// Clear the map first to get fresh state
281-
nodePorts = make(map[int32]bool)
282-
for _, service := range serviceList.Items {
283-
if service.Spec.Type == corev1.ServiceTypeNodePort {
284-
for _, port := range service.Spec.Ports {
285-
if port.NodePort > 0 {
286-
nodePorts[port.NodePort] = true
287-
fmt.Printf("GetNodePort: Found existing nodeport %d in service %s/%s\n",
288-
port.NodePort, service.Namespace, service.Name)
289-
}
290-
}
291-
}
292-
}
293-
} else {
294-
fmt.Printf("ERROR: Failed to list services: %v\n", err)
295-
}
296-
} else {
297-
fmt.Println("WARNING: k8sClient not initialized yet skipping Kubernetes service check")
298-
}
299-
300-
attempts := 0
301-
for attempts = 0; attempts < 200; attempts++ {
302-
port := minPort + rand.Int32N(int32(portRange))
303-
304-
// Avoid the problematic range around 32030 that often causes conflicts
305-
if port >= 32025 && port <= 32035 {
306-
continue
307-
}
308-
309-
// Ensure the port range doesn't cross into the high conflict zone
310-
if port+portAmount >= 32025 && port <= 32035 {
311-
continue
312-
}
313-
314-
allAvailable := true
315-
for i := int32(0); i <= portAmount; i++ {
316-
if nodePorts[port+i] {
317-
allAvailable = false
318-
break
319-
}
320-
}
321-
322-
if allAvailable {
323-
for i := int32(0); i <= portAmount; i++ {
324-
nodePorts[port+i] = true
325-
}
326-
fmt.Printf("GetNodePort: Successfully allocated NodePort range %d-%d after %d attempts\n",
327-
port, port+portAmount, attempts+1)
328-
return port
329-
}
330-
}
331-
332-
fmt.Printf("WARNING: No free NodePorts found after %d attempts, returning 0 for auto-assignment\n", attempts)
333-
return 0
334-
}
335-
336-
func ReleaseNodePort(port int32) {
337-
nodePortMutex.Lock()
338-
defer nodePortMutex.Unlock()
339-
delete(nodePorts, port)
340-
fmt.Printf("Released NodePort %d\n", port)
341-
}

tests/e2e/const.go

Lines changed: 11 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -52,19 +52,19 @@ const (
5252
zookeeperClusterName = "zookeeper-server"
5353
managedByHelmLabelTemplate = "app.kubernetes.io/managed-by=Helm,app.kubernetes.io/instance=%s"
5454

55-
cruiseControlPodReadinessTimeout = 50 * time.Second
56-
kafkaClusterResourceReadinessTimeout = 60 * time.Second
57-
defaultDeletionTimeout = 20 * time.Second
58-
defaultPodReadinessWaitTime = 10 * time.Second
59-
defaultTopicCreationWaitTime = 10 * time.Second
60-
defaultUserCreationWaitTime = 10 * time.Second
55+
cruiseControlPodReadinessTimeout = 300 * time.Second // Increased for kind environments
56+
kafkaClusterResourceReadinessTimeout = 300 * time.Second // Increased for kind environments
57+
defaultDeletionTimeout = 60 * time.Second // Increased for kind environments
58+
defaultPodReadinessWaitTime = 180 * time.Second // Increased for kind environments
59+
defaultTopicCreationWaitTime = 60 * time.Second // Increased for kind environments
60+
defaultUserCreationWaitTime = 60 * time.Second // Increased for kind environments
6161
kafkaClusterCreateTimeout = 1800 * time.Second
6262
kafkaClusterResourceCleanupTimeout = 600 * time.Second
63-
kcatDeleetionTimeout = 40 * time.Second
64-
zookeeperClusterCreateTimeout = 4 * time.Minute
65-
zookeeperClusterResourceCleanupTimeout = 60 * time.Second
66-
externalConsumerTimeout = 5 * time.Second
67-
externalProducerTimeout = 5 * time.Second
63+
kcatDeleetionTimeout = 120 * time.Second // Increased for kind environments
64+
zookeeperClusterCreateTimeout = 10 * time.Minute // Increased for kind environments
65+
zookeeperClusterResourceCleanupTimeout = 180 * time.Second // Increased for kind environments
66+
externalConsumerTimeout = 120 * time.Second // Increased for kind environments
67+
externalProducerTimeout = 120 * time.Second // Increased for kind environments
6868

6969
zookeeperClusterReplicaCount = 1
7070

0 commit comments

Comments
 (0)