diff --git a/.golangci.yaml b/.golangci.yaml index b2e229d7..327ba672 100644 --- a/.golangci.yaml +++ b/.golangci.yaml @@ -1,5 +1,6 @@ +version: "2" linters: - disable-all: true + default: none enable: - containedctx - dogsled @@ -8,86 +9,86 @@ linters: - errcheck - errname - errorlint - - gci - gocognit - goconst - gocritic -# - godot - - gofmt - - gofumpt - goprintffuncname - gosec - - gosimple - govet - ineffassign + - intrange - lll -# TODO FIX THE FOLLOWING -# - misspell -# - nakedret -# - paralleltest + - misspell + - modernize + - nakedret + - nilerr + - predeclared - revive - sqlclosecheck - staticcheck -# - stylecheck - - typecheck - unconvert - unparam - - unused -# - whitespace - -linters-settings: - gocritic: - enabled-all: true - disabled-checks: - - commentFormatting - godot: - scope: all - gofumpt: - module-path: github.com/castai/cluster-controller - extra-rules: true - goconst: - min-len: 2 - min-occurrences: 5 - golint: - min-confidence: 0 - gomnd: - settings: - mnd: - # don't include the "operation" and "assign" - checks: [argument,case,condition,return] - govet: - # shadow is marked as experimental feature, skip it for now. - check-shadowing: false - settings: - printf: - funcs: - - (github.com/golangci/golangci-lint/pkg/logutils.Log).Infof - - (github.com/golangci/golangci-lint/pkg/logutils.Log).Warnf - - (github.com/golangci/golangci-lint/pkg/logutils.Log).Errorf - - (github.com/golangci/golangci-lint/pkg/logutils.Log).Fatalf - lll: - line-length: 200 - maligned: - suggest-new: true - misspell: - locale: US - revive: - rules: - - name: redefines-builtin-id - disabled: true - - # Allow code like: - # Items: binpacking.Items{ - # { - # }, - # } - - name: nested-structs - disabled: true - gci: - sections: - - standard - - default - - prefix(github.com/castai/cluster-controller) -issues: - exclude-dirs: - - mock + - usestdlibvars + - usetesting + - wastedassign + - whitespace + settings: + goconst: + min-len: 2 + min-occurrences: 5 + gocritic: + disabled-checks: + - commentFormatting + godot: + scope: all + govet: + settings: + printf: + funcs: + - (github.com/golangci/golangci-lint/pkg/logutils.Log).Infof + - (github.com/golangci/golangci-lint/pkg/logutils.Log).Warnf + - (github.com/golangci/golangci-lint/pkg/logutils.Log).Errorf + - (github.com/golangci/golangci-lint/pkg/logutils.Log).Fatalf + lll: + line-length: 200 + misspell: + locale: US + revive: + rules: + - name: redefines-builtin-id + disabled: true + - name: nested-structs + disabled: true + exclusions: + generated: lax + presets: + - comments + - common-false-positives + - legacy + - std-error-handling + paths: + - mock + - third_party$ + - builtin$ + - examples$ +formatters: + enable: + - gci + - gofmt + - gofumpt + settings: + gci: + sections: + - standard + - default + - prefix(github.com/castai/cluster-controller) + gofumpt: + module-path: github.com/castai/cluster-controller + extra-rules: true + exclusions: + generated: lax + paths: + - mock + - third_party$ + - builtin$ + - examples$ diff --git a/Makefile b/Makefile index 6d8b0c1c..1f1c88f0 100644 --- a/Makefile +++ b/Makefile @@ -7,7 +7,7 @@ TOOLS_DIR=bin ROOT_DIR=$(abspath .) TOOLS_GOBIN_DIR := $(abspath $(TOOLS_DIR)) -GOLANGCI_LINT_VER := v1.64.8 +GOLANGCI_LINT_VER := v2.7.2 GOLANGCI_LINT_BIN := golangci-lint GOLANGCI_LINT := $(TOOLS_GOBIN_DIR)/$(GOLANGCI_LINT_BIN)-$(GOLANGCI_LINT_VER) @@ -20,7 +20,7 @@ endif $(GOLANGCI_LINT): - GOBIN=$(TOOLS_GOBIN_DIR) $(GO_INSTALL) github.com/golangci/golangci-lint/cmd/golangci-lint $(GOLANGCI_LINT_BIN) $(GOLANGCI_LINT_VER) + GOBIN=$(TOOLS_GOBIN_DIR) $(GO_INSTALL) github.com/golangci/golangci-lint/v2/cmd/golangci-lint $(GOLANGCI_LINT_BIN) $(GOLANGCI_LINT_VER) ## build: Build the binary for the specified architecture and create a Docker image. Usually this means ARCH=amd64 should be set if running on an ARM machine. Use `go build .` for simple local build. build: diff --git a/cmd/controller/run.go b/cmd/controller/run.go index f4b53662..a0ec640c 100644 --- a/cmd/controller/run.go +++ b/cmd/controller/run.go @@ -295,14 +295,14 @@ func runWithLeaderElection( // This method is always called(even if it was not a leader): // - when controller shuts dow (for example because of SIGTERM) // - we actually lost leader - // So we need to check what whas reason of acutally stopping. + // So we need to check what whas reason of actually stopping. if err := ctx.Err(); err != nil { log.Infof("main context done, stopping controller: %v", err) return } log.Infof("leader lost: %s", id) // We don't need to exit here. - // Leader "on started leading" receive a context that gets cancelled when you're no longer the leader. + // Leader "on started leading" receive a context that gets canceled when you're no longer the leader. }, OnNewLeader: func(identity string) { // We're notified when new leader elected. @@ -338,13 +338,14 @@ func (e *logContextError) Unwrap() error { return e.err } -func runningOnGKE(clientset *kubernetes.Clientset, cfg config.Config) (isGKE bool, err error) { +func runningOnGKE(clientset *kubernetes.Clientset, cfg config.Config) (bool, error) { // When running locally, there is no node. if cfg.SelfPod.Node == "" { return false, nil } - err = waitext.Retry(context.Background(), waitext.DefaultExponentialBackoff(), 3, func(ctx context.Context) (bool, error) { + var isGKE bool + err := waitext.Retry(context.Background(), waitext.DefaultExponentialBackoff(), 3, func(ctx context.Context) (bool, error) { node, err := clientset.CoreV1().Nodes().Get(ctx, cfg.SelfPod.Node, metav1.GetOptions{}) if err != nil && !apierrors.IsNotFound(err) { return true, fmt.Errorf("getting node: %w", err) @@ -361,7 +362,7 @@ func runningOnGKE(clientset *kubernetes.Clientset, cfg config.Config) (isGKE boo }, func(err error) { }) - return + return isGKE, err } func saveMetadata(clusterID string, cfg config.Config, log *logrus.Entry) error { diff --git a/cmd/root.go b/cmd/root.go index 316a049b..f0a6e048 100644 --- a/cmd/root.go +++ b/cmd/root.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "os" + "slices" "github.com/spf13/cobra" @@ -21,11 +22,8 @@ func Execute(ctx context.Context) { cmd := rootCmd.Commands() for _, a := range cmd { - for _, b := range os.Args[1:] { - if a.Name() == b { - cmdFound = true - break - } + if slices.Contains(os.Args[1:], a.Name()) { + cmdFound = true } } if !cmdFound { diff --git a/health/healthz.go b/health/healthz.go index 2360f20f..1ea5c9c2 100644 --- a/health/healthz.go +++ b/health/healthz.go @@ -46,7 +46,7 @@ func (h *HealthzProvider) Check(_ *http.Request) (err error) { if h.initStartedAt != nil { if time.Since(*h.initStartedAt) > h.cfg.StartTimeLimit { - return fmt.Errorf("there was no sucessful poll action since start of application %s", h.cfg.StartTimeLimit) + return fmt.Errorf("there was no successful poll action since start of application %s", h.cfg.StartTimeLimit) } return nil } diff --git a/internal/actions/actions.go b/internal/actions/actions.go index 761801a2..dab2d9f3 100644 --- a/internal/actions/actions.go +++ b/internal/actions/actions.go @@ -22,23 +22,23 @@ func NewDefaultActionHandlers( helmClient helm.Client, ) ActionHandlers { return ActionHandlers{ - reflect.TypeOf(&castai.ActionDeleteNode{}): NewDeleteNodeHandler(log, clientset), - reflect.TypeOf(&castai.ActionDrainNode{}): NewDrainNodeHandler(log, clientset, castNamespace), - reflect.TypeOf(&castai.ActionPatchNode{}): NewPatchNodeHandler(log, clientset), - reflect.TypeOf(&castai.ActionCreateEvent{}): NewCreateEventHandler(log, clientset), - reflect.TypeOf(&castai.ActionChartUpsert{}): NewChartUpsertHandler(log, helmClient), - reflect.TypeOf(&castai.ActionChartUninstall{}): NewChartUninstallHandler(log, helmClient), - reflect.TypeOf(&castai.ActionChartRollback{}): NewChartRollbackHandler(log, helmClient, k8sVersion), - reflect.TypeOf(&castai.ActionDisconnectCluster{}): NewDisconnectClusterHandler(log, clientset), - reflect.TypeOf(&castai.ActionCheckNodeDeleted{}): NewCheckNodeDeletedHandler(log, clientset), - reflect.TypeOf(&castai.ActionCheckNodeStatus{}): NewCheckNodeStatusHandler(log, clientset), - reflect.TypeOf(&castai.ActionEvictPod{}): NewEvictPodHandler(log, clientset), - reflect.TypeOf(&castai.ActionPatch{}): NewPatchHandler(log, dynamicClient), - reflect.TypeOf(&castai.ActionCreate{}): NewCreateHandler(log, dynamicClient), - reflect.TypeOf(&castai.ActionDelete{}): NewDeleteHandler(log, dynamicClient), + reflect.TypeFor[*castai.ActionDeleteNode](): NewDeleteNodeHandler(log, clientset), + reflect.TypeFor[*castai.ActionDrainNode](): NewDrainNodeHandler(log, clientset, castNamespace), + reflect.TypeFor[*castai.ActionPatchNode](): NewPatchNodeHandler(log, clientset), + reflect.TypeFor[*castai.ActionCreateEvent](): NewCreateEventHandler(log, clientset), + reflect.TypeFor[*castai.ActionChartUpsert](): NewChartUpsertHandler(log, helmClient), + reflect.TypeFor[*castai.ActionChartUninstall](): NewChartUninstallHandler(log, helmClient), + reflect.TypeFor[*castai.ActionChartRollback](): NewChartRollbackHandler(log, helmClient, k8sVersion), + reflect.TypeFor[*castai.ActionDisconnectCluster](): NewDisconnectClusterHandler(log, clientset), + reflect.TypeFor[*castai.ActionCheckNodeDeleted](): NewCheckNodeDeletedHandler(log, clientset), + reflect.TypeFor[*castai.ActionCheckNodeStatus](): NewCheckNodeStatusHandler(log, clientset), + reflect.TypeFor[*castai.ActionEvictPod](): NewEvictPodHandler(log, clientset), + reflect.TypeFor[*castai.ActionPatch](): NewPatchHandler(log, dynamicClient), + reflect.TypeFor[*castai.ActionCreate](): NewCreateHandler(log, dynamicClient), + reflect.TypeFor[*castai.ActionDelete](): NewDeleteHandler(log, dynamicClient), } } func (h ActionHandlers) Close() error { - return h[reflect.TypeOf(&castai.ActionCreateEvent{})].(*CreateEventHandler).Close() + return h[reflect.TypeFor[*castai.ActionCreateEvent]()].(*CreateEventHandler).Close() } diff --git a/internal/actions/check_node_deleted_test.go b/internal/actions/check_node_deleted_test.go index 0a6880d7..145aae87 100644 --- a/internal/actions/check_node_deleted_test.go +++ b/internal/actions/check_node_deleted_test.go @@ -192,7 +192,6 @@ func TestCheckNodeDeletedHandler_Handle(t *testing.T) { }, } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() clientSet := fake.NewClientset(tt.args.tuneFakeObjects...) diff --git a/internal/actions/check_node_status.go b/internal/actions/check_node_status.go index e423d3d2..e2653351 100644 --- a/internal/actions/check_node_status.go +++ b/internal/actions/check_node_status.go @@ -5,6 +5,7 @@ import ( "errors" "fmt" "reflect" + "slices" "time" "github.com/samber/lo" @@ -12,7 +13,7 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" - "k8s.io/client-go/kubernetes/typed/core/v1" + v1 "k8s.io/client-go/kubernetes/typed/core/v1" "github.com/castai/cluster-controller/internal/castai" "github.com/castai/cluster-controller/internal/waitext" @@ -63,7 +64,6 @@ func (h *CheckNodeStatusHandler) Handle(ctx context.Context, action *castai.Clus case castai.ActionCheckNodeStatus_DELETED: log.Info("checking node deleted") return h.checkNodeDeleted(ctx, log, req) - } return fmt.Errorf("unknown status to check provided node=%s status=%s", req.NodeName, req.NodeStatus) @@ -176,13 +176,7 @@ func (h *CheckNodeStatusHandler) isNodeReady(node *corev1.Node, castNodeID, prov } func containsUninitializedNodeTaint(taints []corev1.Taint) bool { - for _, taint := range taints { - // Some providers like AKS provider adds this taint even if node contains ready condition. - if taint == taintCloudProviderUninitialized { - return true - } - } - return false + return slices.Contains(taints, taintCloudProviderUninitialized) } var taintCloudProviderUninitialized = corev1.Taint{ diff --git a/internal/actions/check_node_status_test.go b/internal/actions/check_node_status_test.go index 331a49e6..9f84f2d8 100644 --- a/internal/actions/check_node_status_test.go +++ b/internal/actions/check_node_status_test.go @@ -185,7 +185,6 @@ func TestCheckNodeStatusHandler_Handle_Deleted(t *testing.T) { }, } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() clientSet := fake.NewClientset(tt.fields.tuneFakeObjects...) @@ -434,7 +433,6 @@ func TestCheckNodeStatusHandler_Handle_Ready(t *testing.T) { }, } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() clientSet := fake.NewClientset() diff --git a/internal/actions/create_event_handler_test.go b/internal/actions/create_event_handler_test.go index 63a5c669..1689112e 100644 --- a/internal/actions/create_event_handler_test.go +++ b/internal/actions/create_event_handler_test.go @@ -105,7 +105,7 @@ func TestCreateEvent(t *testing.T) { ctx := context.Background() wg := sync.WaitGroup{} wg.Add(test.actionCount) - for i := 0; i < test.actionCount; i++ { + for range test.actionCount { go func() { err := h.Handle(ctx, test.action) r.NoError(err) @@ -114,7 +114,7 @@ func TestCreateEvent(t *testing.T) { } wg.Wait() events := make([]string, 0, test.actionCount) - for i := 0; i < test.actionCount; i++ { + for i := range test.actionCount { select { case event := <-recorder.Events: events = append(events, event) @@ -123,7 +123,7 @@ func TestCreateEvent(t *testing.T) { continue } } - for i := 0; i < test.actionCount; i++ { + for i := range test.actionCount { r.Contains(events[i], test.expectedEvent.Reason) r.Contains(events[i], test.expectedEvent.Message) } @@ -153,7 +153,7 @@ func TestRandomNs(t *testing.T) { ctx := context.Background() wg := sync.WaitGroup{} wg.Add(actionCount) - for i := 0; i < actionCount; i++ { + for range actionCount { go func() { err := h.Handle(ctx, &castai.ClusterAction{ ID: uuid.New().String(), @@ -193,7 +193,7 @@ func TestRandomNs(t *testing.T) { broadCaster.Shutdown() } r.Len(events, actionCount) - for i := 0; i < actionCount; i++ { + for i := range actionCount { r.Contains(events[i], "Warning") r.Contains(events[i], "Oh common, you can do better.") } @@ -257,7 +257,6 @@ func TestCreateEventHandler_Handle(t *testing.T) { }, } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() m := gomock.NewController(t) @@ -270,14 +269,12 @@ func TestCreateEventHandler_Handle(t *testing.T) { // defer handler.Close() actionCount := 10000 wg := sync.WaitGroup{} - for i := 0; i < actionCount; i++ { - wg.Add(1) - go func() { - defer wg.Done() + for range actionCount { + wg.Go(func() { if err := handler.Handle(context.Background(), tt.args.action); (err != nil) != tt.wantErr { t.Errorf("Handle() error = %v, wantErr %v", err, tt.wantErr) } - }() + }) } wg.Wait() }) diff --git a/internal/actions/create_handler.go b/internal/actions/create_handler.go index 13312f76..fb8c3d98 100644 --- a/internal/actions/create_handler.go +++ b/internal/actions/create_handler.go @@ -45,7 +45,7 @@ func (h *CreateHandler) Handle(ctx context.Context, action *castai.ClusterAction log := h.log.WithFields(logrus.Fields{ ActionIDLogField: action.ID, "action": action.GetType(), - "gvr": req.GroupVersionResource.String(), + "gvr": req.String(), "name": newObj.GetName(), }) diff --git a/internal/actions/create_handler_test.go b/internal/actions/create_handler_test.go index c8f23869..50e231c7 100644 --- a/internal/actions/create_handler_test.go +++ b/internal/actions/create_handler_test.go @@ -29,7 +29,7 @@ func Test_newCreateHandler(t *testing.T) { tests := map[string]struct { objs []runtime.Object action *castai.ClusterAction - convertFn func(i map[string]interface{}) client.Object + convertFn func(i map[string]any) client.Object err error want runtime.Object }{ @@ -59,7 +59,7 @@ func Test_newCreateHandler(t *testing.T) { }, }, want: newDeployment(), - convertFn: func(i map[string]interface{}) client.Object { + convertFn: func(i map[string]any) client.Object { out := &appsv1.Deployment{} _ = runtime.DefaultUnstructuredConverter.FromUnstructured(i, out) return out @@ -85,7 +85,7 @@ func Test_newCreateHandler(t *testing.T) { d.(*appsv1.Deployment).CreationTimestamp = now d.(*appsv1.Deployment).Labels = map[string]string{"changed": "true"} }), - convertFn: func(i map[string]interface{}) client.Object { + convertFn: func(i map[string]any) client.Object { out := &appsv1.Deployment{} _ = runtime.DefaultUnstructuredConverter.FromUnstructured(i, out) return out @@ -111,7 +111,7 @@ func Test_newCreateHandler(t *testing.T) { d.(*appsv1.Deployment).CreationTimestamp = now d.(*appsv1.Deployment).Finalizers = []string{"autoscaling.cast.ai/recommendation"} }), - convertFn: func(i map[string]interface{}) client.Object { + convertFn: func(i map[string]any) client.Object { out := &appsv1.Deployment{} _ = runtime.DefaultUnstructuredConverter.FromUnstructured(i, out) return out @@ -129,7 +129,7 @@ func Test_newCreateHandler(t *testing.T) { }, }, want: newNamespace(), - convertFn: func(i map[string]interface{}) client.Object { + convertFn: func(i map[string]any) client.Object { out := &v1.Namespace{} _ = runtime.DefaultUnstructuredConverter.FromUnstructured(i, out) return out @@ -138,7 +138,6 @@ func Test_newCreateHandler(t *testing.T) { } for name, test := range tests { - test := test t.Run(name, func(t *testing.T) { r := require.New(t) log := logrus.New() @@ -166,7 +165,7 @@ func Test_newCreateHandler(t *testing.T) { } } -func getObj(t *testing.T, obj runtime.Object) map[string]interface{} { +func getObj(t *testing.T, obj runtime.Object) map[string]any { t.Helper() unstructured, err := runtime.DefaultUnstructuredConverter.ToUnstructured(obj) if err != nil { diff --git a/internal/actions/csr/approve_csr_handler_test.go b/internal/actions/csr/approve_csr_handler_test.go index 38db9fb1..454e636f 100644 --- a/internal/actions/csr/approve_csr_handler_test.go +++ b/internal/actions/csr/approve_csr_handler_test.go @@ -97,12 +97,12 @@ func TestApproveCSRHandler(t *testing.T) { } client := fake.NewClientset(csrRes) // Return NotFound for all v1 resources. - client.PrependReactor("*", "*", func(action ktest.Action) (handled bool, ret runtime.Object, err error) { + client.PrependReactor("*", "*", func(action ktest.Action) (handled bool, resource runtime.Object, reacton error) { if action.GetResource().Version == "v1" { - err = apierrors.NewNotFound(schema.GroupResource{}, action.GetResource().String()) + err := apierrors.NewNotFound(schema.GroupResource{}, action.GetResource().String()) return true, nil, err } - return + return false, nil, nil }) client.PrependReactor("update", "certificatesigningrequests", func(action ktest.Action) (handled bool, ret runtime.Object, err error) { approved := csrRes.DeepCopy() @@ -153,14 +153,14 @@ func TestApproveCSRHandler(t *testing.T) { } client := fake.NewClientset(csrRes) // Return NotFound for all v1 resources. - client.PrependReactor("*", "*", func(action ktest.Action) (handled bool, ret runtime.Object, err error) { + client.PrependReactor("*", "*", func(action ktest.Action) (handled bool, resource runtime.Object, reacton error) { if action.GetResource().Version == "v1" { - err = apierrors.NewNotFound(schema.GroupResource{}, action.GetResource().String()) + err := apierrors.NewNotFound(schema.GroupResource{}, action.GetResource().String()) return true, nil, err } - return + return false, nil, nil }) - client.PrependReactor("update", "certificatesigningrequests", func(action ktest.Action) (handled bool, ret runtime.Object, err error) { + client.PrependReactor("update", "certificatesigningrequests", func(action ktest.Action) (handled bool, resource runtime.Object, reacton error) { c := csrRes.DeepCopy() c.Status.Conditions = []certv1beta1.CertificateSigningRequestCondition{ { diff --git a/internal/actions/csr/integration_test.go b/internal/actions/csr/integration_test.go index 79fbdef1..afd04f02 100644 --- a/internal/actions/csr/integration_test.go +++ b/internal/actions/csr/integration_test.go @@ -104,7 +104,7 @@ func testIntegration(t *testing.T, csrVersion schema.GroupVersion) { username: "system:serviceaccount:castai-agent:castai-cluster-controller", }, { - description: "[client-kubelet] with username prefix sytem:node", + description: "[client-kubelet] with username prefix system:node", nodeName: "csr-cast-pool-6", signer: certv1.KubeAPIServerClientKubeletSignerName, usages: []string{string(certv1.UsageClientAuth)}, @@ -571,7 +571,7 @@ func setupManagerAndClientset(t *testing.T, csrVersion schema.GroupVersion) *fak err = apierrors.NewNotFound(schema.GroupResource{}, action.GetResource().String()) return true, nil, err } - return + return false, nil, nil }) logger := logrus.New() logger.SetOutput(io.Discard) diff --git a/internal/actions/csr/svc.go b/internal/actions/csr/svc.go index c679a73c..7f504c49 100644 --- a/internal/actions/csr/svc.go +++ b/internal/actions/csr/svc.go @@ -73,7 +73,7 @@ func (m *ApprovalManager) Start(ctx context.Context) error { c := make(chan *wrapper.CSR, 1) handlerFuncs := cache.ResourceEventHandlerFuncs{ - AddFunc: func(obj interface{}) { + AddFunc: func(obj any) { csr, err := wrapper.NewCSR(m.clientset, obj) if err != nil { m.log.WithError(err).Warn("creating csr wrapper") diff --git a/internal/actions/csr/svc_test.go b/internal/actions/csr/svc_test.go index f7f1c7ea..fc35b091 100644 --- a/internal/actions/csr/svc_test.go +++ b/internal/actions/csr/svc_test.go @@ -228,7 +228,7 @@ func TestApproveCSRExponentialBackoff(t *testing.T) { r := require.New(t) b := newApproveCSRExponentialBackoff() var sum time.Duration - for i := 0; i < 10; i++ { + for range 10 { tmp := b.Step() sum += tmp } diff --git a/internal/actions/csr/wrapper/csr.go b/internal/actions/csr/wrapper/csr.go index a7aff8a1..49d3e436 100644 --- a/internal/actions/csr/wrapper/csr.go +++ b/internal/actions/csr/wrapper/csr.go @@ -34,7 +34,7 @@ type CSR struct { } // NewCSR validates and creates new certificateRequestFacade. -func NewCSR(clientset kubernetes.Interface, csrObj interface{}) (*CSR, error) { +func NewCSR(clientset kubernetes.Interface, csrObj any) (*CSR, error) { var ( v1 *certv1.CertificateSigningRequest v1b1 *certv1beta1.CertificateSigningRequest diff --git a/internal/actions/csr/wrapper/csr_test.go b/internal/actions/csr/wrapper/csr_test.go index e42b461f..70336df5 100644 --- a/internal/actions/csr/wrapper/csr_test.go +++ b/internal/actions/csr/wrapper/csr_test.go @@ -27,7 +27,7 @@ func TestNewCSR(t *testing.T) { t.Parallel() for _, testcase := range []struct { name string - csrObj interface{} + csrObj any notOK bool }{ { @@ -650,7 +650,7 @@ func withConditionsV1Beta1(t *testing.T, clientset kubernetes.Interface, conditi func v1WithCreationTimestamp(t *testing.T, clientset kubernetes.Interface, creationTime time.Time) *wrapper.CSR { t.Helper() result, err := wrapper.NewCSR(clientset, modifyValidV1(t, func(v1 *certv1.CertificateSigningRequest) *certv1.CertificateSigningRequest { - v1.ObjectMeta.CreationTimestamp = metav1.NewTime(creationTime) + v1.CreationTimestamp = metav1.NewTime(creationTime) return v1 })) require.NoError(t, err, "failed to create CSR") @@ -660,7 +660,7 @@ func v1WithCreationTimestamp(t *testing.T, clientset kubernetes.Interface, creat func v1beta1WithCreationTimestamp(t *testing.T, clientset kubernetes.Interface, creationTime time.Time) *wrapper.CSR { t.Helper() result, err := wrapper.NewCSR(clientset, modifyValidV1Beta1(t, func(v1beta1 *certv1beta1.CertificateSigningRequest) *certv1beta1.CertificateSigningRequest { - v1beta1.ObjectMeta.CreationTimestamp = metav1.NewTime(creationTime) + v1beta1.CreationTimestamp = metav1.NewTime(creationTime) return v1beta1 })) require.NoError(t, err, "failed to create CSR") diff --git a/internal/actions/delete_handler.go b/internal/actions/delete_handler.go index 4a45e712..52ae8f2a 100644 --- a/internal/actions/delete_handler.go +++ b/internal/actions/delete_handler.go @@ -36,7 +36,7 @@ func (h *DeleteHandler) Handle(ctx context.Context, action *castai.ClusterAction log := h.log.WithFields(logrus.Fields{ "id": action.ID, "action": action.GetType(), - "gvr": req.ID.GroupVersionResource.String(), + "gvr": req.ID.String(), "name": req.ID.Name, }) diff --git a/internal/actions/delete_handler_test.go b/internal/actions/delete_handler_test.go index c5ba067e..e0595595 100644 --- a/internal/actions/delete_handler_test.go +++ b/internal/actions/delete_handler_test.go @@ -103,7 +103,6 @@ func Test_newDeleteHandler(t *testing.T) { } for name, test := range tests { - test := test t.Run(name, func(t *testing.T) { r := require.New(t) log := logrus.New() diff --git a/internal/actions/delete_node_handler_test.go b/internal/actions/delete_node_handler_test.go index 07611873..25769703 100644 --- a/internal/actions/delete_node_handler_test.go +++ b/internal/actions/delete_node_handler_test.go @@ -128,7 +128,6 @@ func TestDeleteNodeHandler_Handle(t *testing.T) { }, } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() var clientSet kubernetes.Interface diff --git a/internal/actions/drain_node_handler.go b/internal/actions/drain_node_handler.go index 4ac119b1..422d3e90 100644 --- a/internal/actions/drain_node_handler.go +++ b/internal/actions/drain_node_handler.go @@ -334,7 +334,7 @@ func (h *DrainNodeHandler) listNodePodsToEvict(ctx context.Context, log logrus.F // Evict CAST PODs as last ones. for _, p := range pods.Items { // Skip pods that have been recently removed. - if !p.ObjectMeta.DeletionTimestamp.IsZero() && + if !p.DeletionTimestamp.IsZero() && int(time.Since(p.ObjectMeta.GetDeletionTimestamp().Time).Seconds()) > h.cfg.skipDeletedTimeoutSeconds { continue } @@ -365,7 +365,7 @@ func (h *DrainNodeHandler) listNodePodsToEvict(ctx context.Context, log logrus.F // This is useful when you don't expect some pods on the node to terminate (e.g. because eviction failed for them) so there is no reason to wait until timeout. // The wait can potentially run forever if pods are scheduled on the node and are not evicted/deleted by anything. Use a timeout to avoid infinite wait. func (h *DrainNodeHandler) waitNodePodsTerminated(ctx context.Context, log logrus.FieldLogger, node *v1.Node, podsToIgnore []*v1.Pod) error { - // Check if context is cancelled before starting any work. + // Check if context is canceled before starting any work. select { case <-ctx.Done(): return ctx.Err() diff --git a/internal/actions/evict_pod_handler.go b/internal/actions/evict_pod_handler.go index ca9133f5..09c7f4d0 100644 --- a/internal/actions/evict_pod_handler.go +++ b/internal/actions/evict_pod_handler.go @@ -45,7 +45,7 @@ func (h *EvictPodHandler) Handle(ctx context.Context, action *castai.ClusterActi } log := h.log.WithFields(logrus.Fields{ ActionIDLogField: action.ID, - "action": reflect.TypeOf(req).String(), + "action": reflect.TypeFor[*castai.ActionEvictPod]().String(), "namespace": req.Namespace, "pod": req.PodName, }) diff --git a/internal/actions/kubernetes_helpers.go b/internal/actions/kubernetes_helpers.go index b78163e4..c1c088f9 100644 --- a/internal/actions/kubernetes_helpers.go +++ b/internal/actions/kubernetes_helpers.go @@ -168,7 +168,7 @@ func executeBatchPodActions( pods []v1.Pod, action func(context.Context, v1.Pod) error, actionName string, -) (successfulPods []*v1.Pod, failedPods []podActionFailure) { +) ([]*v1.Pod, []podActionFailure) { if actionName == "" { actionName = "unspecified" } @@ -220,15 +220,17 @@ func executeBatchPodActions( close(failedPodsChan) close(successfulPodsChan) + var successfulPods []*v1.Pod for pod := range successfulPodsChan { successfulPods = append(successfulPods, pod) } + var failedPods []podActionFailure for failure := range failedPodsChan { failedPods = append(failedPods, failure) } - return + return successfulPods, failedPods } func defaultBackoff() wait.Backoff { diff --git a/internal/actions/kubernetes_helpers_test.go b/internal/actions/kubernetes_helpers_test.go index 35c8ca94..31200fa7 100644 --- a/internal/actions/kubernetes_helpers_test.go +++ b/internal/actions/kubernetes_helpers_test.go @@ -277,7 +277,6 @@ func Test_isNodeIDProviderIDValid(t *testing.T) { }, } for _, tt := range tests { - tt := tt // capture range variable t.Run(tt.name, func(t *testing.T) { t.Parallel() got := isNodeIDProviderIDValid(tt.args.node, tt.args.nodeID, tt.args.providerID, logrus.New()) @@ -548,7 +547,6 @@ func Test_getNodeByIDs(t *testing.T) { }, } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() ctrl := gomock.NewController(t) diff --git a/internal/actions/patch_handler.go b/internal/actions/patch_handler.go index 69bef7e8..2eaec289 100644 --- a/internal/actions/patch_handler.go +++ b/internal/actions/patch_handler.go @@ -43,7 +43,7 @@ func (h *PatchHandler) Handle(ctx context.Context, action *castai.ClusterAction) log := h.log.WithFields(logrus.Fields{ ActionIDLogField: action.ID, "action": action.GetType(), - "gvr": req.ID.GroupVersionResource.String(), + "gvr": req.ID.String(), "name": req.ID.Name, }) if req.ID.Namespace != nil { diff --git a/internal/actions/patch_handler_test.go b/internal/actions/patch_handler_test.go index a0a8d1b9..f58545b7 100644 --- a/internal/actions/patch_handler_test.go +++ b/internal/actions/patch_handler_test.go @@ -69,7 +69,6 @@ func TestPatchHandler(t *testing.T) { } for name, test := range tests { - test := test t.Run(name, func(t *testing.T) { t.Parallel() r := require.New(t) @@ -92,7 +91,7 @@ func TestPatchHandler(t *testing.T) { // The fake client does not work properly with patching. And it does not aim to replicate the api-server logic. // There are ways to work around it, but the test is testing fake code then. // For context, here's the PR that attempted to circumvent the issue: https://github.com/kubernetes/kubernetes/pull/78630 - actions := client.Fake.Actions() + actions := client.Actions() r.Len(actions, 1) action, ok := actions[0].(client_testing.PatchAction) r.True(ok, "action is not a patch action") diff --git a/internal/actions/patch_node_handler.go b/internal/actions/patch_node_handler.go index 8cf1ce6d..042db9e1 100644 --- a/internal/actions/patch_node_handler.go +++ b/internal/actions/patch_node_handler.go @@ -88,7 +88,7 @@ func (h *PatchNodeHandler) Handle(ctx context.Context, action *castai.ClusterAct if req.Unschedulable == nil && len(req.Labels) == 0 && len(req.Taints) == 0 && len(req.Annotations) == 0 { log.Info("no patch for node spec or labels") } else { - log.WithFields(map[string]interface{}{ + log.WithFields(map[string]any{ "labels": req.Labels, "taints": req.Taints, "annotations": req.Annotations, @@ -108,8 +108,8 @@ func (h *PatchNodeHandler) Handle(ctx context.Context, action *castai.ClusterAct if len(req.Capacity) > 0 { log.WithField("capacity", req.Capacity).Infof("patching node status") - patch, err := json.Marshal(map[string]interface{}{ - "status": map[string]interface{}{ + patch, err := json.Marshal(map[string]any{ + "status": map[string]any{ "capacity": req.Capacity, }, }) diff --git a/internal/actions/patch_node_handler_test.go b/internal/actions/patch_node_handler_test.go index 29d30941..d6910f9f 100644 --- a/internal/actions/patch_node_handler_test.go +++ b/internal/actions/patch_node_handler_test.go @@ -245,7 +245,6 @@ func TestPatchNodeHandler_Handle(t *testing.T) { }, } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() clientSet := fake.NewClientset(tt.fields.tuneFakeObjects...) diff --git a/internal/actions/types.go b/internal/actions/types.go index 1d60069f..50e12acf 100644 --- a/internal/actions/types.go +++ b/internal/actions/types.go @@ -25,7 +25,7 @@ var ( errNodeWatcherClosed = fmt.Errorf("node watcher closed, no more events will be received") ) -func newUnexpectedTypeErr(value, expectedType interface{}) error { +func newUnexpectedTypeErr(value, expectedType any) error { return fmt.Errorf("unexpected type %T, expected %T %w", value, expectedType, errAction) } diff --git a/internal/castai/types.go b/internal/castai/types.go index c9bc612d..340619d2 100644 --- a/internal/castai/types.go +++ b/internal/castai/types.go @@ -48,7 +48,7 @@ type ClusterAction struct { Error *string `json:"error,omitempty"` } -func (c *ClusterAction) Data() interface{} { +func (c *ClusterAction) Data() any { if c.ActionDeleteNode != nil { return c.ActionDeleteNode } @@ -140,7 +140,7 @@ type ActionPatch struct { type ActionCreate struct { GroupVersionResource `json:",inline"` - Object map[string]interface{} `json:"object,omitempty"` + Object map[string]any `json:"object,omitempty"` } type ActionDelete struct { diff --git a/internal/config/config_test.go b/internal/config/config_test.go index c5c8ae0e..839c6698 100644 --- a/internal/config/config_test.go +++ b/internal/config/config_test.go @@ -1,7 +1,6 @@ package config import ( - "os" "testing" "time" @@ -12,16 +11,16 @@ import ( func TestConfig(t *testing.T) { clusterId := uuid.New().String() - require.NoError(t, os.Setenv("API_KEY", "abc")) - require.NoError(t, os.Setenv("API_URL", "api.cast.ai")) - require.NoError(t, os.Setenv("KUBECONFIG", "~/.kube/config")) - require.NoError(t, os.Setenv("CLUSTER_ID", clusterId)) - require.NoError(t, os.Setenv("LEADER_ELECTION_ENABLED", "true")) - require.NoError(t, os.Setenv("LEADER_ELECTION_NAMESPACE", "castai-agent")) - require.NoError(t, os.Setenv("LEADER_ELECTION_LOCK_NAME", "castai-cluster-controller")) - require.NoError(t, os.Setenv("LEADER_ELECTION_LEASE_DURATION", "25s")) - require.NoError(t, os.Setenv("LEADER_ELECTION_LEASE_RENEW_DEADLINE", "20s")) - require.NoError(t, os.Setenv("METRICS_PORT", "16000")) + t.Setenv("API_KEY", "abc") + t.Setenv("API_URL", "api.cast.ai") + t.Setenv("KUBECONFIG", "~/.kube/config") + t.Setenv("CLUSTER_ID", clusterId) + t.Setenv("LEADER_ELECTION_ENABLED", "true") + t.Setenv("LEADER_ELECTION_NAMESPACE", "castai-agent") + t.Setenv("LEADER_ELECTION_LOCK_NAME", "castai-cluster-controller") + t.Setenv("LEADER_ELECTION_LEASE_DURATION", "25s") + t.Setenv("LEADER_ELECTION_LEASE_RENEW_DEADLINE", "20s") + t.Setenv("METRICS_PORT", "16000") cfg := Get() diff --git a/internal/controller/controller.go b/internal/controller/controller.go index d1d7bea9..bd375e6f 100644 --- a/internal/controller/controller.go +++ b/internal/controller/controller.go @@ -189,19 +189,18 @@ func (s *Controller) startProcessing(actionID string) bool { return true } -func (s *Controller) handleAction(ctx context.Context, action *castai.ClusterAction) (err error) { +func (s *Controller) handleAction(ctx context.Context, action *castai.ClusterAction) (retErr error) { // Check if the action can be used at all before continuing. // We still want to ACK the action in this case, otherwise it will keep being resent until it expires. if !action.IsValid() || (action.GetType() == castai.UnknownActionType) { - err = fmt.Errorf("invalid action, check action data or if cluster controller version supports this action type") - return + return fmt.Errorf("invalid action, check action data or if cluster controller version supports this action type") } actionType := reflect.TypeOf(action.Data()) defer func() { if rerr := recover(); rerr != nil { - err = fmt.Errorf("panic: handling action %s: %s: %s", actionType, rerr, string(debug.Stack())) + retErr = fmt.Errorf("panic: handling action %s: %s: %s", actionType, rerr, string(debug.Stack())) } }() diff --git a/internal/controller/controller_test.go b/internal/controller/controller_test.go index fd6e8f0f..97629ad4 100644 --- a/internal/controller/controller_test.go +++ b/internal/controller/controller_test.go @@ -232,7 +232,6 @@ func TestController_Run(t *testing.T) { }, } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() m := gomock.NewController(t) @@ -247,9 +246,9 @@ func TestController_Run(t *testing.T) { tt.fields.tuneMockHandler(handler) } testActionHandlers := map[reflect.Type]actions.ActionHandler{ - reflect.TypeOf(&castai.ActionDeleteNode{}): handler, - reflect.TypeOf(&castai.ActionDrainNode{}): handler, - reflect.TypeOf(&castai.ActionPatchNode{}): handler, + reflect.TypeFor[*castai.ActionDeleteNode](): handler, + reflect.TypeFor[*castai.ActionDrainNode](): handler, + reflect.TypeFor[*castai.ActionPatchNode](): handler, } s := NewService( diff --git a/internal/controller/logexporter/logexporter.go b/internal/controller/logexporter/logexporter.go index 7d435924..3bc6a273 100644 --- a/internal/controller/logexporter/logexporter.go +++ b/internal/controller/logexporter/logexporter.go @@ -3,6 +3,7 @@ package logexporter import ( "context" "fmt" + "maps" "path" "runtime" "sync" @@ -82,9 +83,7 @@ func (e *LogExporter) Fire(entry *logrus.Entry) error { Message: entry.Message, } castLogEntry.Fields = make(logrus.Fields, len(entry.Data)) - for k, v := range entry.Data { - castLogEntry.Fields[k] = v - } + maps.Copy(castLogEntry.Fields, entry.Data) go func(entry *castai.LogEntry) { defer e.wg.Done() diff --git a/internal/controller/logexporter/logexporter_test.go b/internal/controller/logexporter/logexporter_test.go index 7390b96c..45e592e8 100644 --- a/internal/controller/logexporter/logexporter_test.go +++ b/internal/controller/logexporter/logexporter_test.go @@ -53,7 +53,6 @@ func TestSetupLogExporter(t *testing.T) { }, } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() m := gomock.NewController(t) diff --git a/internal/helm/chart_loader.go b/internal/helm/chart_loader.go index fc20154a..405b13e5 100644 --- a/internal/helm/chart_loader.go +++ b/internal/helm/chart_loader.go @@ -92,7 +92,7 @@ func (cl *remoteChartLoader) fetchArchive(ctx context.Context, archiveURL string httpClient := &http.Client{ Timeout: 30 * time.Second, } - archiveReq, err := http.NewRequestWithContext(ctx, "GET", archiveURL, nil) + archiveReq, err := http.NewRequestWithContext(ctx, http.MethodGet, archiveURL, nil) if err != nil { return nil, err } diff --git a/internal/helm/client.go b/internal/helm/client.go index 9bbeaefe..3fa29158 100644 --- a/internal/helm/client.go +++ b/internal/helm/client.go @@ -108,7 +108,7 @@ func (c *client) Install(ctx context.Context, opts InstallOptions) (*release.Rel install.Wait = true // Wait unit all applied resources are running. // Prepare user value overrides. - values := map[string]interface{}{} + values := map[string]any{} if err := mergeValuesOverrides(values, opts.ValuesOverrides); err != nil { return nil, err } @@ -160,7 +160,7 @@ func (c *client) Upgrade(ctx context.Context, opts UpgradeOptions) (*release.Rel name := opts.Release.Name // Prepare user value overrides. - values := map[string]interface{}{} + values := map[string]any{} if len(opts.Release.Config) > 0 { values = opts.Release.Config } @@ -228,13 +228,13 @@ func (c *configurationGetter) Get(namespace string) (*action.Configuration, erro return cfg, nil } -func (c *configurationGetter) debugFuncf(format string, v ...interface{}) { +func (c *configurationGetter) debugFuncf(format string, v ...any) { if c.debug { c.log.Debug(fmt.Sprintf(format, v...)) } } -func mergeValuesOverrides(values map[string]interface{}, overrides map[string]string) error { +func mergeValuesOverrides(values map[string]any, overrides map[string]string) error { for k, v := range overrides { value := fmt.Sprintf("%s=%v", k, v) if err := strvals.ParseInto(value, values); err != nil { diff --git a/internal/helm/client_test.go b/internal/helm/client_test.go index e78bef18..193dac19 100644 --- a/internal/helm/client_test.go +++ b/internal/helm/client_test.go @@ -41,8 +41,8 @@ func TestClientInstall(t *testing.T) { r.NoError(err) r.Equal("nginx-ingress", rel.Name) r.Equal("test", rel.Namespace) - r.Equal(int64(2), rel.Config["controller"].(map[string]interface{})["replicaCount"]) - r.Equal("NodePort", rel.Config["controller"].(map[string]interface{})["service"].(map[string]interface{})["type"]) + r.Equal(int64(2), rel.Config["controller"].(map[string]any)["replicaCount"]) + r.Equal("NodePort", rel.Config["controller"].(map[string]any)["service"].(map[string]any)["type"]) r.Equal("noop", rel.Config["random"]) } @@ -71,7 +71,7 @@ func TestClientUpdate(t *testing.T) { r.NoError(err) r.NotNil(rel) r.Equal("nginx-ingress", rel.Name) - r.Equal(int64(100), rel.Config["controller"].(map[string]interface{})["replicaCount"]) + r.Equal(int64(100), rel.Config["controller"].(map[string]any)["replicaCount"]) r.Equal("noop", rel.Config["random"]) } @@ -101,7 +101,7 @@ func TestClientUpdateResetThenReuseValue(t *testing.T) { r.NoError(err) r.NotNil(rel) r.Equal("nginx-ingress", rel.Name) - r.Equal(int64(100), rel.Config["controller"].(map[string]interface{})["replicaCount"]) + r.Equal(int64(100), rel.Config["controller"].(map[string]any)["replicaCount"]) r.Equal("noop", rel.Config["random"]) } @@ -135,7 +135,7 @@ func (c *testConfigurationGetter) Get(_ string) (*action.Configuration, error) { Releases: storage.Init(driver.NewMemory()), KubeClient: &fake.PrintingKubeClient{Out: io.Discard}, Capabilities: chartutil.DefaultCapabilities, - Log: func(format string, v ...interface{}) { + Log: func(format string, v ...any) { c.t.Helper() c.t.Logf(format, v...) }, @@ -168,10 +168,10 @@ func buildNginxIngressChart() *chart.Chart { Templates: []*chart.File{ {Name: "templates/hello", Data: []byte("hello: world")}, }, - Values: map[string]interface{}{ - "controller": map[string]interface{}{ + Values: map[string]any{ + "controller": map[string]any{ "replicaCount": 1, - "service": map[string]interface{}{ + "service": map[string]any{ "type": "LoadBalancer", }, }, @@ -191,7 +191,7 @@ func buildNginxIngressRelease(status release.Status) *release.Release { Description: "Named Release Stub", }, Chart: buildNginxIngressChart(), - Config: map[string]interface{}{"name": "value"}, + Config: map[string]any{"name": "value"}, Version: 1, } } diff --git a/internal/helm/hook/hook.go b/internal/helm/hook/hook.go index 4b6168f7..0ff2f9e8 100644 --- a/internal/helm/hook/hook.go +++ b/internal/helm/hook/hook.go @@ -96,7 +96,7 @@ func (l *LabelIgnoreHook) Run(renderedManifests *bytes.Buffer) (*bytes.Buffer, e if _, ok := labelIgnoreResources[key]; ok { oldLabels := getChartLabels(oldManifests, u.GetName(), u.GetKind(), u.GetNamespace()) if oldLabels == nil { - return nil, fmt.Errorf("updating a previously non-existant chart %s", gvk) + return nil, fmt.Errorf("updating a previously non-existent chart %s", gvk) } labelCopy := u.GetLabels() // Reset version labels to previous release. diff --git a/internal/helm/hook/hook_test.go b/internal/helm/hook/hook_test.go index db1ba8e5..a66e2582 100644 --- a/internal/helm/hook/hook_test.go +++ b/internal/helm/hook/hook_test.go @@ -27,7 +27,7 @@ type k8sObjectDetails struct { } func renderManifestTemplate(apiVersion, kind, name, appVersion, chartVersion string) (string, error) { - vars := map[string]interface{}{ + vars := map[string]any{ "ApiVersion": apiVersion, "Kind": kind, "Name": name, diff --git a/internal/monitor/metatada_test.go b/internal/monitor/metatada_test.go index 3ecbd87d..46234ced 100644 --- a/internal/monitor/metatada_test.go +++ b/internal/monitor/metatada_test.go @@ -34,7 +34,6 @@ func TestSaveMetadata(t *testing.T) { } for testName, tt := range tests { - tt := tt t.Run(testName, func(t *testing.T) { r := require.New(t) baseDir := t.TempDir() diff --git a/internal/waitext/extensions.go b/internal/waitext/extensions.go index 7a704442..7059e6a9 100644 --- a/internal/waitext/extensions.go +++ b/internal/waitext/extensions.go @@ -44,7 +44,7 @@ func NewConstantBackoff(interval time.Duration) wait.Backoff { // Retry executes an operation with retries following these semantics: // -// - The operation is executed at least once (even if context is cancelled) +// - The operation is executed at least once (even if context is canceled) // // - If operation returns nil error, assumption is that it succeeded // @@ -54,13 +54,13 @@ func NewConstantBackoff(interval time.Duration) wait.Backoff { // // - retries reaches 0 // -// - the context is cancelled +// - the context is canceled // // The end result is: // // - nil if operation was successful at least once // - last encountered error from operation if retries are exhausted -// - a multi-error if context is cancelled that contains - the ctx.Err(), context.Cause() and last encountered error from the operation +// - a multi-error if context is canceled that contains - the ctx.Err(), context.Cause() and last encountered error from the operation // // If retryNotify is passed, it is called when making retries. // Caveat: this function is similar to wait.ExponentialBackoff but has some important behavior differences like at-least-one execution and retryable errors. diff --git a/internal/waitext/extensions_test.go b/internal/waitext/extensions_test.go index 5d656a1a..1ee0ca35 100644 --- a/internal/waitext/extensions_test.go +++ b/internal/waitext/extensions_test.go @@ -16,7 +16,7 @@ func TestNewConstantBackoff(t *testing.T) { expectedSleepDuration := 10 * time.Second backoff := NewConstantBackoff(expectedSleepDuration) - for i := 0; i < 10; i++ { + for range 10 { r.Equal(expectedSleepDuration, backoff.Step()) } } @@ -162,12 +162,12 @@ func TestRetry(t *testing.T) { cancel(cancelCause) <-done - r.ErrorIs(overallReturnedErr, context.Canceled, "Expected context cancelled to be propagated") + r.ErrorIs(overallReturnedErr, context.Canceled, "Expected context canceled to be propagated") r.ErrorIs(overallReturnedErr, innerError, "Expected inner error by operation be propagated") r.ErrorIs(overallReturnedErr, cancelCause, "Expected cancel cause error to be propagated") }) - t.Run("Operation is called at least once, even if context is cancelled", func(t *testing.T) { + t.Run("Operation is called at least once, even if context is canceled", func(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) cancel() diff --git a/loadtest/castai.go b/loadtest/castai.go index 641df3d6..6e386cdb 100644 --- a/loadtest/castai.go +++ b/loadtest/castai.go @@ -38,7 +38,7 @@ func NewTestServer(logger *slog.Logger, cfg TestServerConfig) *CastAITestServer } // ExecuteActions pushes the list of actions to the queue for cluster controller to process. -// This method returns when all actions are acked or context is cancelled. +// This method returns when all actions are acked or context is canceled. func (c *CastAITestServer) ExecuteActions(ctx context.Context, actions []castai.ClusterAction) { // owner channel has 1:n relationship with the actions. It handles the ack ownerChannel := make(chan string, len(actions)) @@ -47,7 +47,7 @@ func (c *CastAITestServer) ExecuteActions(ctx context.Context, actions []castai. if action.ID == "" { action.ID = uuid.NewString() } - if action.CreatedAt == (time.Time{}) { + if action.CreatedAt.IsZero() { action.CreatedAt = time.Now() } c.addActionToStore(action.ID, action, ownerChannel) diff --git a/loadtest/scenarios/delete_node.go b/loadtest/scenarios/delete_node.go index 99bea2dc..034bc28d 100644 --- a/loadtest/scenarios/delete_node.go +++ b/loadtest/scenarios/delete_node.go @@ -64,7 +64,7 @@ func (s *deleteNodeScenario) Preparation(ctx context.Context, namespace string, s.log.Info(fmt.Sprintf("Creating deployment on node %s", nodeName)) deployment := Deployment(fmt.Sprintf("fake-deployment-%s-%d", node.Name, i)) - deployment.ObjectMeta.Namespace = namespace + deployment.Namespace = namespace //nolint:gosec // Not afraid of overflow here. deployment.Spec.Replicas = lo.ToPtr(int32(s.deploymentReplicas)) deployment.Spec.Template.Spec.NodeName = nodeName diff --git a/loadtest/scenarios/drain_node.go b/loadtest/scenarios/drain_node.go index a3dfe914..15d1d1e8 100644 --- a/loadtest/scenarios/drain_node.go +++ b/loadtest/scenarios/drain_node.go @@ -65,7 +65,7 @@ func (s *drainNodeScenario) Preparation(ctx context.Context, namespace string, c s.log.Info(fmt.Sprintf("Creating deployment on node %s", nodeName)) deployment := Deployment(fmt.Sprintf("fake-deployment-%s-%d", node.Name, i)) - deployment.ObjectMeta.Namespace = namespace + deployment.Namespace = namespace //nolint:gosec // Not afraid of overflow here. deployment.Spec.Replicas = lo.ToPtr(int32(s.deploymentReplicas)) deployment.Spec.Template.Spec.NodeName = nodeName diff --git a/loadtest/scenarios/evict_pod.go b/loadtest/scenarios/evict_pod.go index 948bff0f..51538e8f 100644 --- a/loadtest/scenarios/evict_pod.go +++ b/loadtest/scenarios/evict_pod.go @@ -55,7 +55,7 @@ func (e *evictPodScenario) Preparation(ctx context.Context, namespace string, cl } pod := Pod(fmt.Sprintf("evict-pod-%d", i)) - pod.ObjectMeta.Namespace = namespace + pod.Namespace = namespace pod.Spec.NodeName = nodeName e.log.Info(fmt.Sprintf("Creating pod %s", pod.Name)) diff --git a/loadtest/scenarios/scenario.go b/loadtest/scenarios/scenario.go index 92f3863f..0900d5a1 100644 --- a/loadtest/scenarios/scenario.go +++ b/loadtest/scenarios/scenario.go @@ -66,7 +66,7 @@ func RunScenario( return fmt.Errorf("failed to create namespace %v: %w", namespaceForTest, err) } defer func() { - // Cleanup uses different context so it runs even when the overall one is already cancelled + // Cleanup uses different context so it runs even when the overall one is already canceled ctxForCleanup, cancel := context.WithTimeout(context.Background(), 5*time.Minute) defer cancel() @@ -88,7 +88,7 @@ func RunScenario( logger.Info("Running preparation function") // We defer the cleanup before running preparation or run because each can "fail" in the middle and leave hanging resources. defer func() { - // Cleanup uses different context so it runs even when the overall one is already cancelled + // Cleanup uses different context so it runs even when the overall one is already canceled ctxForCleanup, cancel := context.WithTimeout(context.Background(), 5*time.Minute) defer cancel() diff --git a/loadtest/scenarios/stuck_drain.go b/loadtest/scenarios/stuck_drain.go index ea53f80e..227d35fa 100644 --- a/loadtest/scenarios/stuck_drain.go +++ b/loadtest/scenarios/stuck_drain.go @@ -71,11 +71,11 @@ func (s *stuckDrainScenario) Preparation(ctx context.Context, namespace string, s.log.Info(fmt.Sprintf("Creating deployment on node %s", nodeName)) deployment, pdb := DeploymentWithStuckPDB(fmt.Sprintf("fake-deployment-%s-%d", node.Name, i)) - deployment.ObjectMeta.Namespace = namespace + deployment.Namespace = namespace //nolint:gosec // Not afraid of overflow here. deployment.Spec.Replicas = lo.ToPtr(int32(s.deploymentReplicas)) deployment.Spec.Template.Spec.NodeName = nodeName - pdb.ObjectMeta.Namespace = namespace + pdb.Namespace = namespace _, err = clientset.AppsV1().Deployments(namespace).Create(ctx, deployment, metav1.CreateOptions{}) if err != nil {