diff --git a/.github/workflows/pr-golangci-lint.yaml b/.github/workflows/pr-golangci-lint.yaml index 2845fcf82c3c..1df41b038c6c 100644 --- a/.github/workflows/pr-golangci-lint.yaml +++ b/.github/workflows/pr-golangci-lint.yaml @@ -28,10 +28,9 @@ jobs: with: go-version: ${{ steps.vars.outputs.go_version }} - name: golangci-lint - uses: golangci/golangci-lint-action@55c2c1448f86e01eaae002a5a3a9624417608d84 # tag=v6.5.2 + uses: golangci/golangci-lint-action@1481404843c368bc19ca9406f87d6e0fc97bdcfd # tag=v7.0.0 with: - version: v1.63.4 - args: --out-format=colored-line-number + version: v2.0.2 working-directory: ${{matrix.working-directory}} - name: Lint API - run: GOLANGCI_LINT_EXTRA_ARGS=--out-format=colored-line-number make lint-api + run: make lint-api diff --git a/.golangci-kal.yml b/.golangci-kal.yml index 241134f32034..c1f45cdfb890 100644 --- a/.golangci-kal.yml +++ b/.golangci-kal.yml @@ -1,114 +1,113 @@ +version: "2" run: - timeout: 10m go: "1.23" allow-parallel-runners: true - linters: - disable-all: true + default: none enable: - kubeapilinter # linter for Kube API conventions - -linters-settings: - custom: - kubeapilinter: - type: "module" - description: kube-api-linter and lints Kube like APIs based on API conventions and best practices. - settings: - linters: - enable: - - "commentstart" # Ensure comments start with the serialized version of the field name. - - "conditions" # Ensure conditions have the correct json tags and markers. - - "integers" # Ensure only int32 and int64 are used for integers. - - "jsontags" # Ensure every field has a json tag. - - "maxlength" # Ensure all strings and arrays have maximum lengths/maximum items. - - "nobools" # Bools do not evolve over time, should use enums instead. - - "nofloats" # Ensure floats are not used. - - "optionalorrequired" # Every field should be marked as `+optional` or `+required`. - - "requiredfields" # Required fields should not be pointers, and should not have `omitempty`. - - "statussubresource" # All root objects that have a `status` field should have a status subresource. + settings: + custom: + kubeapilinter: + type: module + description: KAL is the Kube-API-Linter and lints Kube like APIs based on API conventions and best practices. + settings: + linters: + enable: + - "commentstart" # Ensure comments start with the serialized version of the field name. + - "conditions" # Ensure conditions have the correct json tags and markers. + - "integers" # Ensure only int32 and int64 are used for integers. + - "jsontags" # Ensure every field has a json tag. + - "maxlength" # Ensure all strings and arrays have maximum lengths/maximum items. + - "nobools" # Bools do not evolve over time, should use enums instead. + - "nofloats" # Ensure floats are not used. + - "optionalorrequired" # Every field should be marked as `+optional` or `+required`. + - "requiredfields" # Required fields should not be pointers, and should not have `omitempty`. + - "statussubresource" # All root objects that have a `status` field should have a status subresource. + + # Per discussion in July 2024, we are keeping phase fields for now. + # See https://github.com/kubernetes-sigs/cluster-api/pull/10897#discussion_r1685929508 + # and https://github.com/kubernetes-sigs/cluster-api/pull/10897#discussion_r1685919394. + # - "nophase" # Phase fields are discouraged by the Kube API conventions, use conditions instead. - # Per discussion in July 2024, we are keeping phase fields for now. - # See https://github.com/kubernetes-sigs/cluster-api/pull/10897#discussion_r1685929508 - # and https://github.com/kubernetes-sigs/cluster-api/pull/10897#discussion_r1685919394. - # - "nophase" # Phase fields are discouraged by the Kube API conventions, use conditions instead. - - # Linters below this line are disabled, pending conversation on how and when to enable them. - disable: - - "*" # We will manually enable new linters after understanding the impact. Disable all by default. - lintersConfig: - conditions: - isFirstField: Warn # Require conditions to be the first field in the status struct. - usePatchStrategy: Forbid # Conditions should not use the patch strategy on CRDs. - useProtobuf: Forbid # We don't use protobuf, so protobuf tags are not required. - # jsonTags: - # jsonTagRegex: "^[a-z][a-z0-9]*(?:[A-Z][a-z0-9]*)*$" # The default regex is appropriate for our use case. - # optionalOrRequired: - # preferredOptionalMarker: optional | kubebuilder:validation:Optional # The preferred optional marker to use, fixes will suggest to use this marker. Defaults to `optional`. - # preferredRequiredMarker: required | kubebuilder:validation:Required # The preferred required marker to use, fixes will suggest to use this marker. Defaults to `required`. - # requiredFields: - # pointerPolicy: Warn | SuggestFix # Defaults to `SuggestFix`. We want our required fields to not be pointers. + # Linters below this line are disabled, pending conversation on how and when to enable them. + disable: + - "*" # We will manually enable new linters after understanding the impact. Disable all by default. + lintersConfig: + conditions: + isFirstField: Warn # Require conditions to be the first field in the status struct. + usePatchStrategy: Forbid # Require conditions to be the first field in the status struct. + useProtobuf: Forbid # We don't use protobuf, so protobuf tags are not required. + # jsonTags: + # jsonTagRegex: "^[a-z][a-z0-9]*(?:[A-Z][a-z0-9]*)*$" # The default regex is appropriate for our use case. + # optionalOrRequired: + # preferredOptionalMarker: optional | kubebuilder:validation:Optional # The preferred optional marker to use, fixes will suggest to use this marker. Defaults to `optional`. + # preferredRequiredMarker: required | kubebuilder:validation:Required # The preferred required marker to use, fixes will suggest to use this marker. Defaults to `required`. + # requiredFields: + # pointerPolicy: Warn | SuggestFix # Defaults to `SuggestFix`. We want our required fields to not be pointers. + exclusions: + generated: lax + rules: + # KAL should only run on API folders. + - path-except: "api//*" + linters: + - kubeapilinter + - path: "api/v1beta2/*|api/addons/v1beta2/*|api/v1beta1/*|api/v1alpha1/*|api/addons/v1beta1/*" + text: "Conditions field must be a slice of metav1.Condition" + linters: + - kubeapilinter + - path: "api/v1beta2/*|api/v1beta1/*" + text: "type ClusterIPFamily should not use an int, int8 or int16. Use int32 or int64 depending on bounding requirements" + linters: + - kubeapilinter + - path: "exp/ipam/api/v1beta2/*|exp/ipam/api/v1alpha1/*|exp/ipam/api/v1beta1/*" + text: "field Prefix should not use an int, int8 or int16. Use int32 or int64 depending on bounding requirements" + linters: + - kubeapilinter + # clusterctl and Runtime Hooks can be fixed once we bump their apiVersion. + - path: "cmd/clusterctl/api/v1alpha3|exp/runtime/hooks/api/v1alpha1" + text: "maxlength" + linters: + - kubeapilinter + # controller-gen does not allow to add MaxItems to Schemaless fields + - path: "api/v1beta2/*|api/v1beta1/*" + text: "maxlength: field (AllOf|OneOf|AnyOf) must have a maximum items, add kubebuilder:validation:MaxItems marker" + linters: + - kubeapilinter + # It does not make sense to add a maxItems marker on the *List structs as they are not used to generate CRD YAMLs. + # This exclude will be removed once https://github.com/JoelSpeed/kubeapilinter/issues/38 is resolved. + - path: "api/v1beta2/*|api/addons/v1beta2/*|api/v1beta1/*|api/v1alpha1/*|api/addons/v1beta1/*" + text: "maxlength: field Items must have a maximum items, add kubebuilder:validation:MaxItems marker" + linters: + - kubeapilinter + - path: "api/v1beta2/*|api/addons/v1beta2/*|api/v1alpha1/*|api/v1beta1/*|api/addons/v1beta1/*" + text: "nobools" + linters: + - kubeapilinter + # We follow the current CustomResourceDefinition field's json tag pattern. + - path: "api/v1beta2/*|api/v1beta1/*" + text: "field (XPreserveUnknownFields|XPreserveUnknownFields|XValidations|XMetadata|XIntOrString) json tag does not match pattern" + linters: + - kubeapilinter + # The following rules are disabled until we migrate to the new API. + - path: "bootstrap/kubeadm/api/v1beta2/kubeadm_types.go|bootstrap/kubeadm/api/v1beta1/kubeadm_types.go" + text: "field Token is marked as required, should not be a pointer" + linters: + - kubeapilinter + - path: "api/v1beta2/clusterclass_types.go|api/v1beta1/clusterclass_types.go" + text: "field Ref is marked as required, should not be a pointer" + linters: + - kubeapilinter + - path: "api/v1beta2/*|api/addons/v1beta2/*|api/v1alpha1/*|api/v1beta1/*|api/v1alpha3/*|api/addons/v1beta1/*" + text: "field Items must be marked as optional or required" + linters: + - kubeapilinter + paths: + - zz_generated.*\.go$ + - vendored_openapi\.go$ + # We don't want to invest time to fix new linter findings in old API types. + - internal/apis/.* + - ".*_test.go" # Exclude test files. issues: - exclude-files: - - "zz_generated.*\\.go$" - - "vendored_openapi\\.go$" - # We don't want to invest time to fix new linter findings in old API types. - - "internal/apis/.*" - - ".*_test.go" # Exclude test files. - max-same-issues: 0 max-issues-per-linter: 0 - exclude-rules: - # KAL should only run on API folders. - - path-except: "api//*" - linters: - - kubeapilinter - - path: "api/v1beta2/*|api/addons/v1beta2/*|api/v1beta1/*|api/v1alpha1/*|api/addons/v1beta1/*" - text: "Conditions field must be a slice of metav1.Condition" - linters: - - kubeapilinter - - path: "api/v1beta2/*|api/v1beta1/*" - text: "type ClusterIPFamily should not use an int, int8 or int16. Use int32 or int64 depending on bounding requirements" - linters: - - kubeapilinter - - path: "exp/ipam/api/v1beta2/*|exp/ipam/api/v1alpha1/*|exp/ipam/api/v1beta1/*" - text: "field Prefix should not use an int, int8 or int16. Use int32 or int64 depending on bounding requirements" - linters: - - kubeapilinter - # clusterctl and Runtime Hooks can be fixed once we bump their apiVersion. - - path: "cmd/clusterctl/api/v1alpha3|exp/runtime/hooks/api/v1alpha1" - text: "maxlength" - linters: - - kubeapilinter - # controller-gen does not allow to add MaxItems to Schemaless fields - - path: "api/v1beta2/*|api/v1beta1/*" - text: "maxlength: field (AllOf|OneOf|AnyOf) must have a maximum items, add kubebuilder:validation:MaxItems marker" - linters: - - kubeapilinter - # It does not make sense to add a maxItems marker on the *List structs as they are not used to generate CRD YAMLs. - # This exclude will be removed once https://github.com/JoelSpeed/kubeapilinter/issues/38 is resolved. - - path: "api/v1beta2/*|api/addons/v1beta2/*|api/v1beta1/*|api/v1alpha1/*|api/addons/v1beta1/*" - text: "maxlength: field Items must have a maximum items, add kubebuilder:validation:MaxItems marker" - linters: - - kubeapilinter - - path: "api/v1beta2/*|api/addons/v1beta2/*|api/v1alpha1/*|api/v1beta1/*|api/addons/v1beta1/*" - text: "nobools" - linters: - - kubeapilinter - # We follow the current CustomResourceDefinition field's json tag pattern. - - path: "api/v1beta2/*|api/v1beta1/*" - text: "field (XPreserveUnknownFields|XPreserveUnknownFields|XValidations|XMetadata|XIntOrString) json tag does not match pattern" - linters: - - kubeapilinter - # The following rules are disabled until we migrate to the new API. - - path: "bootstrap/kubeadm/api/v1beta2/kubeadm_types.go|bootstrap/kubeadm/api/v1beta1/kubeadm_types.go" - text: "field Token is marked as required, should not be a pointer" - linters: - - kubeapilinter - - path: "api/v1beta2/clusterclass_types.go|api/v1beta1/clusterclass_types.go" - text: "field Ref is marked as required, should not be a pointer" - linters: - - kubeapilinter - - path: "api/v1beta2/*|api/addons/v1beta2/*|api/v1alpha1/*|api/v1beta1/*|api/v1alpha3/*|api/addons/v1beta1/*" - text: "field Items must be marked as optional or required" - linters: - - kubeapilinter diff --git a/.golangci.yml b/.golangci.yml index b9777ffc3540..b8733c9ea00e 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -1,13 +1,12 @@ +version: "2" run: - timeout: 10m go: "1.23" build-tags: - tools - e2e allow-parallel-runners: true - linters: - disable-all: true + default: none enable: - asasalint # warns about passing []any to func(...any) without expanding it - asciicheck # non ascii symbols @@ -20,15 +19,11 @@ linters: - durationcheck # multiplying two durations - errcheck # unchecked errors - errchkjson # invalid types passed to json encoder - - gci # ensures imports are organized - ginkgolinter # ginkgo and gomega - gocritic # bugs, performance, style (we could add custom ones to this one) - godot # checks that comments end in a period - - gofmt # warns about incorrect use of fmt functions - - goimports # import formatting - goprintffuncname # printft-like functions should be named with f at the end - gosec # potential security problems - - gosimple # simplify code - govet # basically 'go vet' - importas # consistent import aliases - ineffassign # ineffectual assignments @@ -44,346 +39,357 @@ linters: - predeclared # shadowing predeclared identifiers - revive # better version of golint - staticcheck # some of staticcheck's rules - - stylecheck # another replacement for golint - - tenv # using os.Setenv instead of t.Setenv in tests - thelper # test helpers not starting with t.Helper() - unconvert # unnecessary type conversions - unparam # unused function parameters - unused # unused constants, variables,functions, types - usestdlibvars # using variables/constants from the standard library + - usetesting # report function to be replace by testing - whitespace # unnecessary newlines - -linters-settings: - gosec: - excludes: - # integer overflow conversion int -> int32 - - G115 - gci: - sections: - - standard # Standard section: captures all standard packages. - - default # Default section: contains all imports that could not be matched to another section type. - - prefix(sigs.k8s.io/cluster-api) # Custom section: groups all imports with the specified Prefix. - custom-order: true - ginkgolinter: - forbid-focus-container: true - godot: - # declarations - for top level declaration comments (default); - # toplevel - for top level comments; - # all - for all comments. - scope: toplevel - exclude: - - '^ \+.*' - - '^ ANCHOR.*' - - '^ (alpha|beta|GA): v.*' - gocritic: - enabled-tags: - - diagnostic - - experimental - - performance - disabled-checks: - - appendAssign - - dupImport # https://github.com/go-critic/go-critic/issues/845 - - evalOrder - - ifElseChain - - octalLiteral - - regexpSimplify - - sloppyReassign - - truncateCmp - - typeDefFirst - - unnamedResult - - unnecessaryDefer - - whyNoLint - - wrapperFunc - - rangeValCopy - - hugeParam - importas: - no-unaliased: true - alias: - # Kubernetes - - pkg: k8s.io/api/core/v1 - alias: corev1 - - pkg: k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1 - alias: apiextensionsv1 - - pkg: k8s.io/apimachinery/pkg/apis/meta/v1 - alias: metav1 - - pkg: k8s.io/apimachinery/pkg/api/errors - alias: apierrors - - pkg: k8s.io/apimachinery/pkg/util/errors - alias: kerrors - - pkg: k8s.io/component-base/logs/api/v1 - alias: logsv1 - # Controller Runtime - - pkg: sigs.k8s.io/controller-runtime - alias: ctrl - # CABPK - - pkg: sigs.k8s.io/cluster-api/internal/apis/bootstrap/kubeadm/v1alpha3 - alias: bootstrapv1alpha3 - - pkg: sigs.k8s.io/cluster-api/internal/apis/bootstrap/kubeadm/v1alpha4 - alias: bootstrapv1alpha4 - - pkg: sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1 - alias: bootstrapv1beta1 - - pkg: sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta2 - alias: bootstrapv1 - # KCP - - pkg: sigs.k8s.io/cluster-api/internal/apis/controlplane/kubeadm/v1alpha3 - alias: controlplanev1alpha3 - - pkg: sigs.k8s.io/cluster-api/internal/apis/controlplane/kubeadm/v1alpha4 - alias: controlplanev1alpha4 - - pkg: sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1 - alias: controlplanev1beta1 - - pkg: sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta2 - alias: controlplanev1 - # CAPI - - pkg: sigs.k8s.io/cluster-api/internal/apis/core/v1alpha3 - alias: clusterv1alpha3 - - pkg: sigs.k8s.io/cluster-api/internal/apis/core/v1alpha4 - alias: clusterv1alpha4 - - pkg: sigs.k8s.io/cluster-api/api/v1beta1 - alias: clusterv1beta1 - - pkg: sigs.k8s.io/cluster-api/api/v1beta2 - alias: clusterv1 - # CAPI exp - - pkg: sigs.k8s.io/cluster-api/internal/apis/core/exp/v1alpha3 - alias: expv1alpha3 - - pkg: sigs.k8s.io/cluster-api/internal/apis/core/exp/v1alpha4 - alias: expv1alpha4 - - pkg: sigs.k8s.io/cluster-api/exp/api/v1beta1 - alias: expv1beta1 - - pkg: sigs.k8s.io/cluster-api/exp/api/v1beta2 - alias: expv1 - # CAPI exp addons - - pkg: sigs.k8s.io/cluster-api/internal/apis/addons/v1alpha3 - alias: addonsv1alpha3 - - pkg: sigs.k8s.io/cluster-api/internal/apis/addons/v1alpha4 - alias: addonsv1alpha4 - - pkg: sigs.k8s.io/cluster-api/api/addons/v1beta1 - alias: addonsv1beta1 - - pkg: sigs.k8s.io/cluster-api/api/addons/v1beta2 - alias: addonsv1 - # CAPI exp IPAM - - pkg: sigs.k8s.io/cluster-api/exp/ipam/api/v1beta1 - alias: ipamv1beta1 - - pkg: sigs.k8s.io/cluster-api/exp/ipam/api/v1beta2 - alias: ipamv1 - # CAPI exp runtime - - pkg: sigs.k8s.io/cluster-api/exp/runtime/api/v1alpha1 - alias: runtimev1 - - pkg: sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1 - alias: runtimehooksv1 - - pkg: sigs.k8s.io/cluster-api/exp/runtime/controllers - alias: runtimecontrollers - - pkg: sigs.k8s.io/cluster-api/exp/runtime/catalog - alias: runtimecatalog - - pkg: sigs.k8s.io/cluster-api/internal/runtime/client - alias: internalruntimeclient - - pkg: sigs.k8s.io/cluster-api/exp/runtime/client - alias: runtimeclient - - pkg: sigs.k8s.io/cluster-api/internal/runtime/registry - alias: runtimeregistry - - pkg: sigs.k8s.io/cluster-api/internal/webhooks/runtime - alias: runtimewebhooks - # CAPI utils - - pkg: sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1 - alias: v1beta1conditions - - pkg: sigs.k8s.io/cluster-api/internal/topology/names - alias: topologynames - # CAPD - - pkg: sigs.k8s.io/cluster-api/test/infrastructure/docker/api/v1alpha3 - alias: infrav1alpha3 - - pkg: sigs.k8s.io/cluster-api/test/infrastructure/docker/api/v1alpha4 - alias: infrav1alpha4 - - pkg: sigs.k8s.io/cluster-api/test/infrastructure/docker/api/v1beta1 - alias: infrav1 - # CAPD exp - - pkg: sigs.k8s.io/cluster-api/test/infrastructure/docker/exp/api/v1alpha3 - alias: infraexpv1alpha3 - - pkg: sigs.k8s.io/cluster-api/test/infrastructure/docker/exp/api/v1alpha4 - alias: infraexpv1alpha4 - - pkg: sigs.k8s.io/cluster-api/test/infrastructure/docker/exp/api/v1beta1 - alias: infraexpv1 - nolintlint: - allow-unused: false - require-specific: true - revive: + settings: + ginkgolinter: + forbid-focus-container: true + gocritic: + disabled-checks: + - appendAssign + - dupImport # https://github.com/go-critic/go-critic/issues/845 + - evalOrder + - ifElseChain + - octalLiteral + - regexpSimplify + - sloppyReassign + - truncateCmp + - typeDefFirst + - unnamedResult + - unnecessaryDefer + - whyNoLint + - wrapperFunc + - rangeValCopy + - hugeParam + enabled-tags: + - diagnostic + - experimental + - performance + godot: + # declarations - for top level declaration comments (default); + # toplevel - for top level comments; + # all - for all comments. + scope: toplevel + exclude: + - ^ \+.* + - ^ ANCHOR.* + - '^ (alpha|beta|GA): v.*' + gosec: + excludes: + # integer overflow conversion int -> int32 + - G115 + importas: + alias: + # Kubernetes + - pkg: k8s.io/api/core/v1 + alias: corev1 + - pkg: k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1 + alias: apiextensionsv1 + - pkg: k8s.io/apimachinery/pkg/apis/meta/v1 + alias: metav1 + - pkg: k8s.io/apimachinery/pkg/api/errors + alias: apierrors + - pkg: k8s.io/apimachinery/pkg/util/errors + alias: kerrors + - pkg: k8s.io/component-base/logs/api/v1 + alias: logsv1 + # Controller Runtime + - pkg: sigs.k8s.io/controller-runtime + alias: ctrl + # CABPK + - pkg: sigs.k8s.io/cluster-api/internal/apis/bootstrap/kubeadm/v1alpha3 + alias: bootstrapv1alpha3 + - pkg: sigs.k8s.io/cluster-api/internal/apis/bootstrap/kubeadm/v1alpha4 + alias: bootstrapv1alpha4 + - pkg: sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1 + alias: bootstrapv1beta1 + - pkg: sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta2 + alias: bootstrapv1 + # KCP + - pkg: sigs.k8s.io/cluster-api/internal/apis/controlplane/kubeadm/v1alpha3 + alias: controlplanev1alpha3 + - pkg: sigs.k8s.io/cluster-api/internal/apis/controlplane/kubeadm/v1alpha4 + alias: controlplanev1alpha4 + - pkg: sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1 + alias: controlplanev1beta1 + - pkg: sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta2 + alias: controlplanev1 + # CAPI + - pkg: sigs.k8s.io/cluster-api/internal/apis/core/v1alpha3 + alias: clusterv1alpha3 + - pkg: sigs.k8s.io/cluster-api/internal/apis/core/v1alpha4 + alias: clusterv1alpha4 + - pkg: sigs.k8s.io/cluster-api/api/v1beta1 + alias: clusterv1beta1 + - pkg: sigs.k8s.io/cluster-api/api/v1beta2 + alias: clusterv1 + # CAPI exp + - pkg: sigs.k8s.io/cluster-api/internal/apis/core/exp/v1alpha3 + alias: expv1alpha3 + - pkg: sigs.k8s.io/cluster-api/internal/apis/core/exp/v1alpha4 + alias: expv1alpha4 + - pkg: sigs.k8s.io/cluster-api/exp/api/v1beta1 + alias: expv1beta1 + - pkg: sigs.k8s.io/cluster-api/exp/api/v1beta2 + alias: expv1 + # CAPI exp addons + - pkg: sigs.k8s.io/cluster-api/internal/apis/addons/v1alpha3 + alias: addonsv1alpha3 + - pkg: sigs.k8s.io/cluster-api/internal/apis/addons/v1alpha4 + alias: addonsv1alpha4 + - pkg: sigs.k8s.io/cluster-api/api/addons/v1beta1 + alias: addonsv1beta1 + - pkg: sigs.k8s.io/cluster-api/api/addons/v1beta2 + alias: addonsv1 + # CAPI exp IPAM + - pkg: sigs.k8s.io/cluster-api/exp/ipam/api/v1beta1 + alias: ipamv1beta1 + - pkg: sigs.k8s.io/cluster-api/exp/ipam/api/v1beta2 + alias: ipamv1 + # CAPI exp runtime + - pkg: sigs.k8s.io/cluster-api/exp/runtime/api/v1alpha1 + alias: runtimev1 + - pkg: sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1 + alias: runtimehooksv1 + - pkg: sigs.k8s.io/cluster-api/exp/runtime/controllers + alias: runtimecontrollers + - pkg: sigs.k8s.io/cluster-api/exp/runtime/catalog + alias: runtimecatalog + - pkg: sigs.k8s.io/cluster-api/internal/runtime/client + alias: internalruntimeclient + - pkg: sigs.k8s.io/cluster-api/exp/runtime/client + alias: runtimeclient + - pkg: sigs.k8s.io/cluster-api/internal/runtime/registry + alias: runtimeregistry + - pkg: sigs.k8s.io/cluster-api/internal/webhooks/runtime + alias: runtimewebhooks + # CAPI utils + - pkg: sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1 + alias: v1beta1conditions + - pkg: sigs.k8s.io/cluster-api/internal/topology/names + alias: topologynames + # CAPD + - pkg: sigs.k8s.io/cluster-api/test/infrastructure/docker/api/v1alpha3 + alias: infrav1alpha3 + - pkg: sigs.k8s.io/cluster-api/test/infrastructure/docker/api/v1alpha4 + alias: infrav1alpha4 + - pkg: sigs.k8s.io/cluster-api/test/infrastructure/docker/api/v1beta1 + alias: infrav1 + # CAPD exp + - pkg: sigs.k8s.io/cluster-api/test/infrastructure/docker/exp/api/v1alpha3 + alias: infraexpv1alpha3 + - pkg: sigs.k8s.io/cluster-api/test/infrastructure/docker/exp/api/v1alpha4 + alias: infraexpv1alpha4 + - pkg: sigs.k8s.io/cluster-api/test/infrastructure/docker/exp/api/v1beta1 + alias: infraexpv1 + no-unaliased: true + nolintlint: + require-specific: true + allow-unused: false + revive: + rules: + # The following rules are recommended https://github.com/mgechev/revive#recommended-configuration + - name: blank-imports + - name: context-as-argument + - name: context-keys-type + - name: dot-imports + - name: error-return + - name: error-strings + - name: error-naming + - name: exported + - name: if-return + - name: increment-decrement + - name: var-naming + - name: var-declaration + - name: package-comments + - name: range + - name: receiver-naming + - name: time-naming + - name: unexported-return + - name: indent-error-flow + - name: errorf + - name: empty-block + - name: superfluous-else + - name: unused-parameter + - name: unreachable-code + - name: redefines-builtin-id + # + # Rules in addition to the recommended configuration above. + # + - name: bool-literal-in-expr + - name: constant-logical-expr + exclusions: + generated: lax rules: - # The following rules are recommended https://github.com/mgechev/revive#recommended-configuration - - name: blank-imports - - name: context-as-argument - - name: context-keys-type - - name: dot-imports - - name: error-return - - name: error-strings - - name: error-naming - - name: exported - - name: if-return - - name: increment-decrement - - name: var-naming - - name: var-declaration - - name: package-comments - - name: range - - name: receiver-naming - - name: time-naming - - name: unexported-return - - name: indent-error-flow - - name: errorf - - name: empty-block - - name: superfluous-else - - name: unused-parameter - - name: unreachable-code - - name: redefines-builtin-id - # - # Rules in addition to the recommended configuration above. - # - - name: bool-literal-in-expr - - name: constant-logical-expr - goconst: - ignore-tests: true - + # Specific exclude rules for deprecated fields that are still part of the codebase. These + # should be removed as the referenced deprecated item is removed from the project. + - linters: + - staticcheck + text: 'SA1019: (bootstrapv1.ClusterStatus|KubeadmConfigSpec.UseExperimentalRetryJoin|scope.Config.Spec.UseExperimentalRetryJoin|DockerMachine.Spec.Bootstrapped|machineStatus.Bootstrapped|dockerMachine.Spec.Backend.Docker.Bootstrapped|dockerMachine.Spec.Bootstrapped|devMachine.Spec.Backend.Docker.Bootstrapped|c.TopologyPlan|clusterv1.ClusterClassVariableMetadata|clusterv1beta1.ClusterClassVariableMetadata|(variable|currentDefinition|specVar|newVariableDefinition|statusVarDefinition).Metadata) is deprecated' + # Deprecations forGetIPFamily + - linters: + - staticcheck + text: 'SA1019: cluster.GetIPFamily is deprecated: IPFamily is not a concept in Kubernetes. It was originally introduced in CAPI for CAPD. IPFamily will be dropped in a future release. More details at https://github.com/kubernetes-sigs/cluster-api/issues/7521' + # Deprecations for MD revision management + - linters: + - staticcheck + text: 'SA1019: ((deployment|m|md).Spec.RevisionHistoryLimit|clusterv1.RevisionHistoryAnnotation|c.RolloutUndo) is deprecated' + # Deprecations for MD revision management + - linters: + - staticcheck + text: 'SA1019: (m|md).Spec.ProgressDeadlineSeconds is deprecated: This field is deprecated and is going to be removed in the next apiVersion. Please see https://github.com/kubernetes-sigs/cluster-api/issues/11470 for more details.' + # Deprecations for MHC MaxUnhealthy, UnhealthyRange + - linters: + - staticcheck + text: 'SA1019: (mhc|m)(.Spec.MaxUnhealthy|.Spec.UnhealthyRange) is deprecated' + # Specific exclude rules for deprecated packages that are still part of the codebase. These + # should be removed as the referenced deprecated packages are removed from the project. + - linters: + - staticcheck + text: 'SA1019: .* is deprecated: This package will be removed in one of the next releases.' + # Specific exclude rules for deprecated types that are still part of the codebase. These + # should be removed as the referenced deprecated types are removed from the project. + - linters: + - staticcheck + text: 'SA1019: (clusterv1alpha3.*|clusterv1alpha4.*) is deprecated: This type will be removed in one of the next releases.' + # Specific exclude rules for deprecated feature flags + - linters: + - staticcheck + text: 'SA1019: feature.ClusterResourceSet is deprecated: ClusterResourceSet feature is now GA and the corresponding feature flag will be removed in 1.12 release.' + # v1Beta1 deprecated fields + - linters: + - staticcheck + text: 'SA1019: .*\.Deprecated\.V1Beta1.* is deprecated' + - linters: + - revive + text: 'exported: exported method .*\.(Reconcile|SetupWithManager|SetupWebhookWithManager) should have comment or be unexported' + - linters: + - errcheck + text: Error return value of .((os\.)?std(out|err)\..*|.*Close|.*Flush|os\.Remove(All)?|.*print(f|ln)?|os\.(Un)?Setenv). is not checked + # Exclude some packages or code to require comments, for example test code, or fake clients. + - linters: + - revive + text: exported (method|function|type|const) (.+) should have comment or be unexported + source: (func|type).*Fake.* + - linters: + - revive + path: fake_\.go + text: exported (method|function|type|const) (.+) should have comment or be unexported + - linters: + - revive + path: cmd/clusterctl/internal/test/providers.*.go + text: exported (method|function|type|const) (.+) should have comment or be unexported + - linters: + - revive + path: (framework|e2e)/.*.go + text: exported (method|function|type|const) (.+) should have comment or be unexported + # Disable unparam "always receives" which might not be really + # useful when building libraries. + - linters: + - unparam + text: always receives + # Dot imports for gomega and ginkgo are allowed + # within test files and test utils. + - linters: + - revive + - staticcheck + path: _test\.go + text: should not use dot imports + - linters: + - revive + - staticcheck + path: (framework|e2e)/.*.go + text: should not use dot imports + # Large parts of this file are duplicate from k/k. Let's ignore "emptyStringTest" to reduce the noise in diffs + # and to avoid making mistakes by diverging from upstream just because of this purely stylistic linter finding. + - linters: + - gocritic + path: internal/topology/variables/clusterclass_variable_validation.go + text: emptyStringTest + # Append should be able to assign to a different var/slice. + - linters: + - gocritic + text: 'appendAssign: append result not assigned to the same slice' + # Disable linters for conversion + - linters: + - staticcheck + path: .*(api|types)\/.*\/conversion.*\.go$ + text: 'SA1019: in.(.+) is deprecated' + - linters: + - revive + # Ignoring stylistic checks for generated code + path: .*(api|types|test)\/.*\/conversion.*\.go$ + # Checking if an error is nil to just after return the error or nil is redundant + text: 'if-return: redundant if ...; err != nil check, just return error instead' + - linters: + - revive + # Ignoring stylistic checks for generated code + path: .*(api|types|test)\/.*\/conversion.*\.go$ + # Exported function and methods should have comments. This warns on undocumented exported functions and methods. + text: exported (method|function|type|const) (.+) should have comment or be unexported + - linters: + - revive + # Ignoring stylistic checks for generated code + path: .*(api|types|test)\/.*\/conversion.*\.go$ + # This rule warns when initialism, variable or package naming conventions are not followed. + text: 'var-naming: don''t use underscores in Go names;' + - linters: + - revive + # Ignoring stylistic checks for generated code + path: .*(api|types)\/.*\/conversion.*\.go$ + # By convention, receiver names in a method should reflect their identity. + text: 'receiver-naming: receiver name' + - linters: + - staticcheck + path: .*(api|types|test)\/.*\/conversion.*\.go$ + text: 'ST1003: should not use underscores in Go names;' + - linters: + - staticcheck + path: .*(api|types)\/.*\/conversion.*\.go$ + text: 'ST1016: methods on the same type should have the same receiver name' + # We don't care about defer in for loops in test files. + - linters: + - gocritic + path: _test\.go + text: 'deferInLoop: Possible resource leak, ''defer'' is called in the ''for'' loop' + # Ignore non-constant format string in call to condition utils + - linters: + - govet + text: non-constant format string in call to sigs\.k8s\.io\/cluster-api\/util\/conditions\. + - linters: + - goconst + path: (.+)_test\.go + paths: + - zz_generated.*\.go$ + - vendored_openapi\.go$ + - internal/apis/.* + - third_party$ + - builtin$ + - examples$ issues: - exclude-files: - - "zz_generated.*\\.go$" - - "vendored_openapi\\.go$" - # We don't want to invest time to fix new linter findings in old API types. - - "internal/apis/.*" - - max-same-issues: 0 max-issues-per-linter: 0 - # We are disabling default golangci exclusions because we want to help reviewers to focus on reviewing the most relevant - # changes in PRs and avoid nitpicking. - exclude-use-default: false - exclude-rules: - # Specific exclude rules for deprecated fields that are still part of the codebase. These - # should be removed as the referenced deprecated item is removed from the project. - - linters: - - staticcheck - text: "SA1019: (bootstrapv1.ClusterStatus|KubeadmConfigSpec.UseExperimentalRetryJoin|scope.Config.Spec.UseExperimentalRetryJoin|DockerMachine.Spec.Bootstrapped|machineStatus.Bootstrapped|dockerMachine.Spec.Backend.Docker.Bootstrapped|devMachine.Spec.Backend.Docker.Bootstrapped|c.TopologyPlan|clusterv1.ClusterClassVariableMetadata|clusterv1beta1.ClusterClassVariableMetadata|(variable|currentDefinition|specVar|newVariableDefinition|statusVarDefinition).Metadata) is deprecated" - # Deprecations forGetIPFamily - - linters: - - staticcheck - text: "SA1019: cluster.GetIPFamily is deprecated: IPFamily is not a concept in Kubernetes. It was originally introduced in CAPI for CAPD. IPFamily will be dropped in a future release. More details at https://github.com/kubernetes-sigs/cluster-api/issues/7521" - # Deprecations for MD revision management - - linters: - - staticcheck - text: "SA1019: ((deployment|m|md).Spec.RevisionHistoryLimit|clusterv1.RevisionHistoryAnnotation|c.RolloutUndo) is deprecated" - # Deprecations for MD revision management - - linters: - - staticcheck - text: "SA1019: (m|md).Spec.ProgressDeadlineSeconds is deprecated: This field is deprecated and is going to be removed in the next apiVersion. Please see https://github.com/kubernetes-sigs/cluster-api/issues/11470 for more details." - # Deprecations for MHC MaxUnhealthy, UnhealthyRange - - linters: - - staticcheck - text: "SA1019: (mhc|m)(.Spec.MaxUnhealthy|.Spec.UnhealthyRange) is deprecated" - # v1Beta1 deprecated fields - - linters: - - staticcheck - text: "SA1019: .*\\.Deprecated\\.V1Beta1.* is deprecated" - # Specific exclude rules for deprecated packages that are still part of the codebase. These - # should be removed as the referenced deprecated packages are removed from the project. - - linters: - - staticcheck - text: "SA1019: .* is deprecated: This package will be removed in one of the next releases." - # Specific exclude rules for deprecated types that are still part of the codebase. These - # should be removed as the referenced deprecated types are removed from the project. - - linters: - - staticcheck - text: "SA1019: (clusterv1alpha3.*|clusterv1alpha4.*) is deprecated: This type will be removed in one of the next releases." - # Specific exclude rules for deprecated feature flags - - linters: - - staticcheck - text: "SA1019: feature.ClusterResourceSet is deprecated: ClusterResourceSet feature is now GA and the corresponding feature flag will be removed in 1.12 release." - - linters: - - revive - text: "exported: exported method .*\\.(Reconcile|SetupWithManager|SetupWebhookWithManager) should have comment or be unexported" - - linters: - - errcheck - text: Error return value of .((os\.)?std(out|err)\..*|.*Close|.*Flush|os\.Remove(All)?|.*print(f|ln)?|os\.(Un)?Setenv). is not checked - # Exclude some packages or code to require comments, for example test code, or fake clients. - - linters: - - revive - text: exported (method|function|type|const) (.+) should have comment or be unexported - source: (func|type).*Fake.* - - linters: - - revive - text: exported (method|function|type|const) (.+) should have comment or be unexported - path: fake_\.go - - linters: - - revive - text: exported (method|function|type|const) (.+) should have comment or be unexported - path: cmd/clusterctl/internal/test/providers.*.go - - linters: - - revive - text: exported (method|function|type|const) (.+) should have comment or be unexported - path: "(framework|e2e)/.*.go" - # Disable unparam "always receives" which might not be really - # useful when building libraries. - - linters: - - unparam - text: always receives - # Dot imports for gomega and ginkgo are allowed - # within test files and test utils. - - linters: - - revive - - stylecheck - path: _test\.go - text: should not use dot imports - - linters: - - revive - - stylecheck - path: (framework|e2e)/.*.go - text: should not use dot imports - # Large parts of this file are duplicate from k/k. Let's ignore "emptyStringTest" to reduce the noise in diffs - # and to avoid making mistakes by diverging from upstream just because of this purely stylistic linter finding. - - linters: - - gocritic - text: "emptyStringTest" - path: internal/topology/variables/clusterclass_variable_validation.go - # Append should be able to assign to a different var/slice. - - linters: - - gocritic - text: "appendAssign: append result not assigned to the same slice" - # Disable linters for conversion - - linters: - - staticcheck - text: "SA1019: in.(.+) is deprecated" - path: .*(api|types)\/.*\/conversion.*\.go$ - - linters: - - revive - # Checking if an error is nil to just after return the error or nil is redundant - text: "if-return: redundant if ...; err != nil check, just return error instead" - # Ignoring stylistic checks for generated code - path: .*(api|types|test)\/.*\/conversion.*\.go$ - - linters: - - revive - # Exported function and methods should have comments. This warns on undocumented exported functions and methods. - text: exported (method|function|type|const) (.+) should have comment or be unexported - # Ignoring stylistic checks for generated code - path: .*(api|types|test)\/.*\/conversion.*\.go$ - - linters: - - revive - # This rule warns when initialism, variable or package naming conventions are not followed. - text: "var-naming: don't use underscores in Go names;" - # Ignoring stylistic checks for generated code - path: .*(api|types|test)\/.*\/conversion.*\.go$ - - linters: - - revive - # By convention, receiver names in a method should reflect their identity. - text: "receiver-naming: receiver name" - # Ignoring stylistic checks for generated code - path: .*(api|types)\/.*\/conversion.*\.go$ - - linters: - - stylecheck - text: "ST1003: should not use underscores in Go names;" - path: .*(api|types|test)\/.*\/conversion.*\.go$ - - linters: - - stylecheck - text: "ST1016: methods on the same type should have the same receiver name" - path: .*(api|types)\/.*\/conversion.*\.go$ - # We don't care about defer in for loops in test files. - - linters: - - gocritic - text: "deferInLoop: Possible resource leak, 'defer' is called in the 'for' loop" - path: _test\.go - # Ignore non-constant format string in call to condition utils - - linters: - - govet - text: "non-constant format string in call to sigs\\.k8s\\.io\\/cluster-api\\/util\\/conditions\\/deprecated\\/v1beta1\\." + max-same-issues: 0 +formatters: + enable: + - gci # ensures imports are organized + - gofmt # warns about incorrect use of fmt functions + - goimports # import formatting + settings: + gci: + sections: + - standard # Standard section: captures all standard packages. + - default # Default section: contains all imports that could not be matched to another section type. + - prefix(sigs.k8s.io/cluster-api) # Custom section: groups all imports with the specified Prefix. + custom-order: true + exclusions: + generated: lax + paths: + - zz_generated.*\.go$ + - vendored_openapi\.go$ + # We don't want to invest time to fix new linter findings in old API types. + - internal/apis/.* diff --git a/Makefile b/Makefile index 893d5d00cf8d..2a99af7d4be4 100644 --- a/Makefile +++ b/Makefile @@ -162,7 +162,7 @@ GINKGO_PKG := github.com/onsi/ginkgo/v2/ginkgo GOLANGCI_LINT_BIN := golangci-lint GOLANGCI_LINT_VER := $(shell cat .github/workflows/pr-golangci-lint.yaml | grep [[:space:]]version: | sed 's/.*version: //') GOLANGCI_LINT := $(abspath $(TOOLS_BIN_DIR)/$(GOLANGCI_LINT_BIN)-$(GOLANGCI_LINT_VER)) -GOLANGCI_LINT_PKG := github.com/golangci/golangci-lint/cmd/golangci-lint +GOLANGCI_LINT_PKG := github.com/golangci/golangci-lint/v2/cmd/golangci-lint GOLANGCI_LINT_KAL_BIN := golangci-lint-kube-api-linter GOLANGCI_LINT_KAL_VER := $(shell cat ./hack/tools/.custom-gcl.yaml | grep version: | sed 's/version: //') diff --git a/bootstrap/kubeadm/internal/cloudinit/controlplane_init.go b/bootstrap/kubeadm/internal/cloudinit/controlplane_init.go index a7412a7a8a54..5ce131b34561 100644 --- a/bootstrap/kubeadm/internal/cloudinit/controlplane_init.go +++ b/bootstrap/kubeadm/internal/cloudinit/controlplane_init.go @@ -60,7 +60,7 @@ type ControlPlaneInput struct { // NewInitControlPlane returns the user data string to be used on a controlplane instance. func NewInitControlPlane(input *ControlPlaneInput) ([]byte, error) { input.Header = cloudConfigHeader - input.WriteFiles = input.Certificates.AsFiles() + input.WriteFiles = input.AsFiles() input.WriteFiles = append(input.WriteFiles, input.AdditionalFiles...) input.SentinelFileCommand = sentinelFileCommand userData, err := generate("InitControlplane", controlPlaneCloudInit, input) diff --git a/bootstrap/kubeadm/internal/cloudinit/controlplane_join.go b/bootstrap/kubeadm/internal/cloudinit/controlplane_join.go index 7cbd5fd00952..127a9047f1b3 100644 --- a/bootstrap/kubeadm/internal/cloudinit/controlplane_join.go +++ b/bootstrap/kubeadm/internal/cloudinit/controlplane_join.go @@ -58,7 +58,7 @@ type ControlPlaneJoinInput struct { // NewJoinControlPlane returns the user data string to be used on a new control plane instance. func NewJoinControlPlane(input *ControlPlaneJoinInput) ([]byte, error) { // TODO: Consider validating that the correct certificates exist. It is different for external/stacked etcd - input.WriteFiles = input.Certificates.AsFiles() + input.WriteFiles = input.AsFiles() input.ControlPlane = true if err := input.prepare(); err != nil { return nil, err diff --git a/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller.go b/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller.go index 9e87a6770529..77cee08d1d57 100644 --- a/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller.go +++ b/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller.go @@ -302,9 +302,9 @@ func (r *KubeadmConfigReconciler) reconcile(ctx context.Context, scope *Scope, c Message: "Waiting for Cluster status.infrastructureReady to be true", }) return ctrl.Result{}, nil - // Reconcile status for machines that already have a secret reference, but our status isn't up to date. - // This case solves the pivoting scenario (or a backup restore) which doesn't preserve the status subresource on objects. - case configOwner.DataSecretName() != nil && (!(config.Status.Initialization != nil && config.Status.Initialization.DataSecretCreated) || config.Status.DataSecretName == nil): + // Reconcile status for machines that already have a secret reference, but our status isn't up to date. + // This case solves the pivoting scenario (or a backup restore) which doesn't preserve the status subresource on objects. + case configOwner.DataSecretName() != nil && (config.Status.Initialization == nil || !config.Status.Initialization.DataSecretCreated || config.Status.DataSecretName == nil): if config.Status.Initialization == nil { config.Status.Initialization = &bootstrapv1.KubeadmConfigInitializationStatus{} } diff --git a/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller_test.go b/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller_test.go index cd0f540a8556..e2c019ab61b0 100644 --- a/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller_test.go +++ b/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller_test.go @@ -2656,7 +2656,7 @@ func addKubeadmConfigToMachine(config *bootstrapv1.KubeadmConfig, machine *clust if machine == nil { panic("no machine passed to function") } - config.ObjectMeta.OwnerReferences = []metav1.OwnerReference{ + config.OwnerReferences = []metav1.OwnerReference{ { Kind: "Machine", APIVersion: clusterv1.GroupVersion.String(), @@ -2678,7 +2678,7 @@ func addKubeadmConfigToMachinePool(config *bootstrapv1.KubeadmConfig, machinePoo if machinePool == nil { panic("no machinePool passed to function") } - config.ObjectMeta.OwnerReferences = []metav1.OwnerReference{ + config.OwnerReferences = []metav1.OwnerReference{ { Kind: "MachinePool", APIVersion: expv1.GroupVersion.String(), diff --git a/bootstrap/kubeadm/internal/ignition/ignition.go b/bootstrap/kubeadm/internal/ignition/ignition.go index bc1436bad529..f936563d2def 100644 --- a/bootstrap/kubeadm/internal/ignition/ignition.go +++ b/bootstrap/kubeadm/internal/ignition/ignition.go @@ -79,7 +79,7 @@ func NewJoinControlPlane(input *ControlPlaneJoinInput) ([]byte, string, error) { return nil, "", fmt.Errorf("controlplane join input can't be nil") } - input.WriteFiles = input.Certificates.AsFiles() + input.WriteFiles = input.AsFiles() input.WriteFiles = append(input.WriteFiles, input.AdditionalFiles...) input.KubeadmCommand = fmt.Sprintf(kubeadmCommandTemplate, joinSubcommand, input.KubeadmVerbosity) @@ -96,7 +96,7 @@ func NewInitControlPlane(input *ControlPlaneInput) ([]byte, string, error) { return nil, "", fmt.Errorf("controlplane input can't be nil") } - input.WriteFiles = input.Certificates.AsFiles() + input.WriteFiles = input.AsFiles() input.WriteFiles = append(input.WriteFiles, input.AdditionalFiles...) input.KubeadmCommand = fmt.Sprintf(kubeadmCommandTemplate, initSubcommand, input.KubeadmVerbosity) diff --git a/bootstrap/kubeadm/types/upstreamv1beta4/bootstraptokenstring.go b/bootstrap/kubeadm/types/upstreamv1beta4/bootstraptokenstring.go index 584da3bdf6bd..b677e0787667 100644 --- a/bootstrap/kubeadm/types/upstreamv1beta4/bootstraptokenstring.go +++ b/bootstrap/kubeadm/types/upstreamv1beta4/bootstraptokenstring.go @@ -59,7 +59,7 @@ func (bts *BootstrapTokenString) UnmarshalJSON(b []byte) error { } // Remove unnecessary " characters coming from the JSON parser - token := strings.Replace(string(b), `"`, ``, -1) + token := strings.ReplaceAll(string(b), `"`, ``) // Convert the string Token to a BootstrapTokenString object newbts, err := NewBootstrapTokenString(token) if err != nil { diff --git a/cmd/clusterctl/client/alpha/machinedeployment.go b/cmd/clusterctl/client/alpha/machinedeployment.go index d25f0f6e2d67..25c7ccbdaa7d 100644 --- a/cmd/clusterctl/client/alpha/machinedeployment.go +++ b/cmd/clusterctl/client/alpha/machinedeployment.go @@ -91,7 +91,8 @@ func findMachineDeploymentRevision(toRevision int64, allMSs []*clusterv1.Machine ) for _, ms := range allMSs { if v, err := revision(ms); err == nil { - if toRevision == 0 { + switch toRevision { + case 0: if latestRevision < v { // newest one we've seen so far previousRevision = latestRevision @@ -103,7 +104,7 @@ func findMachineDeploymentRevision(toRevision int64, allMSs []*clusterv1.Machine previousRevision = v previousMachineSet = ms } - } else if toRevision == v { + case v: return ms, nil } } diff --git a/cmd/clusterctl/client/alpha/rollout_resumer.go b/cmd/clusterctl/client/alpha/rollout_resumer.go index 69c5dc34386b..cc9e23fac313 100644 --- a/cmd/clusterctl/client/alpha/rollout_resumer.go +++ b/cmd/clusterctl/client/alpha/rollout_resumer.go @@ -72,7 +72,7 @@ func resumeMachineDeployment(ctx context.Context, proxy cluster.Proxy, name, nam // resumeKubeadmControlPlane removes paused annotation. func resumeKubeadmControlPlane(ctx context.Context, proxy cluster.Proxy, name, namespace string) error { // In the paused annotation we must replace slashes to ~1, see https://datatracker.ietf.org/doc/html/rfc6901#section-3. - pausedAnnotation := strings.Replace(clusterv1.PausedAnnotation, "/", "~1", -1) + pausedAnnotation := strings.ReplaceAll(clusterv1.PausedAnnotation, "/", "~1") patch := client.RawPatch(types.JSONPatchType, []byte(fmt.Sprintf("[{\"op\": \"remove\", \"path\": \"/metadata/annotations/%s\"}]", pausedAnnotation))) return patchKubeadmControlPlane(ctx, proxy, name, namespace, patch) diff --git a/cmd/clusterctl/client/cluster/cert_manager.go b/cmd/clusterctl/client/cluster/cert_manager.go index 48a8d76a7db0..4565146609d0 100644 --- a/cmd/clusterctl/client/cluster/cert_manager.go +++ b/cmd/clusterctl/client/cluster/cert_manager.go @@ -347,7 +347,7 @@ func (cm *certManagerClient) shouldUpgrade(desiredVersion string, objs, installO // the number of objects when version of objects are equal relevantObjs := []unstructured.Unstructured{} for _, o := range objs { - if !(o.GetKind() == "Endpoints" || o.GetKind() == "EndpointSlice") { + if o.GetKind() != "Endpoints" && o.GetKind() != "EndpointSlice" { relevantObjs = append(relevantObjs, o) } } diff --git a/cmd/clusterctl/client/cluster/components_test.go b/cmd/clusterctl/client/cluster/components_test.go index 47d0969da51a..9cd94d246847 100644 --- a/cmd/clusterctl/client/cluster/components_test.go +++ b/cmd/clusterctl/client/cluster/components_test.go @@ -522,7 +522,7 @@ func Test_providerComponents_ValidateNoObjectsExist(t *testing.T) { }, }, } - crd.ObjectMeta.Labels[clusterctlv1.ClusterctlLabel] = "" + crd.Labels[clusterctlv1.ClusterctlLabel] = "" cr := &unstructured.Unstructured{} cr.SetAPIVersion("some.group/v1") diff --git a/cmd/clusterctl/client/cluster/mover_test.go b/cmd/clusterctl/client/cluster/mover_test.go index b3aa289f17a7..a5b0b57509c0 100644 --- a/cmd/clusterctl/client/cluster/mover_test.go +++ b/cmd/clusterctl/client/cluster/mover_test.go @@ -726,8 +726,8 @@ var backupRestoreTests = []struct { } func fixFilesGVS(file string) string { - s := strings.Replace(file, "$CAPI", clusterv1.GroupVersion.String(), -1) - return strings.Replace(s, "$INFRA", clusterv1.GroupVersionInfrastructure.String(), -1) + s := strings.ReplaceAll(file, "$CAPI", clusterv1.GroupVersion.String()) + return strings.ReplaceAll(s, "$INFRA", clusterv1.GroupVersionInfrastructure.String()) } func Test_objectMover_backupTargetObject(t *testing.T) { @@ -752,14 +752,10 @@ func Test_objectMover_backupTargetObject(t *testing.T) { fromProxy: graph.proxy, } - dir, err := os.MkdirTemp("/tmp", "cluster-api") - if err != nil { - t.Error(err) - } - defer os.RemoveAll(dir) + dir := t.TempDir() for _, node := range graph.uidToNode { - err = mover.backupTargetObject(ctx, node, dir) + err := mover.backupTargetObject(ctx, node, dir) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return @@ -823,12 +819,7 @@ func Test_objectMover_restoreTargetObject(t *testing.T) { ctx := context.Background() - // temporary directory - dir, err := os.MkdirTemp("/tmp", "cluster-api") - if err != nil { - g.Expect(err).ToNot(HaveOccurred()) - } - defer os.RemoveAll(dir) + dir := t.TempDir() // Create an objectGraph bound a source cluster with all the CRDs for the types involved in the test. graph := getObjectGraph() @@ -952,13 +943,9 @@ func Test_objectMover_toDirectory(t *testing.T) { fromProxy: graph.proxy, } - dir, err := os.MkdirTemp("/tmp", "cluster-api") - if err != nil { - t.Error(err) - } - defer os.RemoveAll(dir) + dir := t.TempDir() - err = mover.toDirectory(ctx, graph, dir) + err := mover.toDirectory(ctx, graph, dir) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return @@ -1013,11 +1000,7 @@ func Test_objectMover_filesToObjs(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - dir, err := os.MkdirTemp("/tmp", "cluster-api") - if err != nil { - t.Error(err) - } - defer os.RemoveAll(dir) + dir := t.TempDir() for _, fileName := range tt.files { path := filepath.Join(dir, fileName) @@ -1074,12 +1057,7 @@ func Test_objectMover_fromDirectory(t *testing.T) { ctx := context.Background() - // temporary directory - dir, err := os.MkdirTemp("/tmp", "cluster-api") - if err != nil { - g.Expect(err).ToNot(HaveOccurred()) - } - defer os.RemoveAll(dir) + dir := t.TempDir() // Create an objectGraph bound a source cluster with all the CRDs for the types involved in the test. graph := getObjectGraph() diff --git a/cmd/clusterctl/client/cluster/proxy_test.go b/cmd/clusterctl/client/cluster/proxy_test.go index 73cafb83c883..f50910dea7df 100644 --- a/cmd/clusterctl/client/cluster/proxy_test.go +++ b/cmd/clusterctl/client/cluster/proxy_test.go @@ -64,9 +64,7 @@ func TestProxyGetConfig(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - dir, err := os.MkdirTemp("", "clusterctl") - g.Expect(err).ToNot(HaveOccurred()) - defer os.RemoveAll(dir) + dir := t.TempDir() configFile := filepath.Join(dir, ".test-kubeconfig.yaml") g.Expect(os.WriteFile(configFile, []byte(tt.kubeconfigContents), 0600)).To(Succeed()) @@ -119,9 +117,7 @@ func TestProxyGetConfig(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - dir, err := os.MkdirTemp("", "clusterctl") - g.Expect(err).ToNot(HaveOccurred()) - defer os.RemoveAll(dir) + dir := t.TempDir() configFile := filepath.Join(dir, ".test-kubeconfig.yaml") g.Expect(os.WriteFile(configFile, []byte(kubeconfig("management", "default")), 0600)).To(Succeed()) @@ -147,9 +143,7 @@ func TestKUBECONFIGEnvVar(t *testing.T) { ) g := NewWithT(t) - dir, err := os.MkdirTemp("", "clusterctl") - g.Expect(err).ToNot(HaveOccurred()) - defer os.RemoveAll(dir) + dir := t.TempDir() configFile := filepath.Join(dir, ".test-kubeconfig.yaml") g.Expect(os.WriteFile(configFile, []byte(kubeconfigContents), 0600)).To(Succeed()) @@ -175,9 +169,7 @@ func TestKUBECONFIGEnvVar(t *testing.T) { expectedHost = "https://kind-server:38790" ) g := NewWithT(t) - dir, err := os.MkdirTemp("", "clusterctl") - g.Expect(err).ToNot(HaveOccurred()) - defer os.RemoveAll(dir) + dir := t.TempDir() configFile := filepath.Join(dir, ".test-kubeconfig.yaml") g.Expect(os.WriteFile(configFile, []byte(kubeconfigContents), 0600)).To(Succeed()) @@ -252,9 +244,7 @@ func TestProxyCurrentNamespace(t *testing.T) { if tt.kubeconfigPath != "" { configFile = tt.kubeconfigPath } else { - dir, err := os.MkdirTemp("", "clusterctl") - g.Expect(err).ToNot(HaveOccurred()) - defer os.RemoveAll(dir) + dir := t.TempDir() configFile = filepath.Join(dir, ".test-kubeconfig.yaml") g.Expect(os.WriteFile(configFile, []byte(tt.kubeconfigContents), 0600)).To(Succeed()) } diff --git a/cmd/clusterctl/client/cluster/template_test.go b/cmd/clusterctl/client/cluster/template_test.go index 79c6a809711f..c1042fafa9c5 100644 --- a/cmd/clusterctl/client/cluster/template_test.go +++ b/cmd/clusterctl/client/cluster/template_test.go @@ -279,9 +279,7 @@ func Test_templateClient_getRawUrlFileContent(t *testing.T) { func Test_templateClient_getLocalFileContent(t *testing.T) { g := NewWithT(t) - tmpDir, err := os.MkdirTemp("", "cc") - g.Expect(err).ToNot(HaveOccurred()) - defer os.RemoveAll(tmpDir) + tmpDir := t.TempDir() path := filepath.Join(tmpDir, "cluster-template.yaml") g.Expect(os.WriteFile(path, []byte(template), 0600)).To(Succeed()) @@ -333,9 +331,7 @@ func Test_templateClient_getLocalFileContent(t *testing.T) { func Test_templateClient_GetFromURL(t *testing.T) { g := NewWithT(t) - tmpDir, err := os.MkdirTemp("", "cc") - g.Expect(err).ToNot(HaveOccurred()) - defer os.RemoveAll(tmpDir) + tmpDir := t.TempDir() configClient, err := config.New(context.Background(), "", config.InjectReader(test.NewFakeReader())) g.Expect(err).ToNot(HaveOccurred()) diff --git a/cmd/clusterctl/client/cluster/upgrader.go b/cmd/clusterctl/client/cluster/upgrader.go index 156bc76a7a7d..0eea2dd863df 100644 --- a/cmd/clusterctl/client/cluster/upgrader.go +++ b/cmd/clusterctl/client/cluster/upgrader.go @@ -371,9 +371,9 @@ func (u *providerUpgrader) doUpgrade(ctx context.Context, upgradePlan *UpgradePl // rules for out of tree providers. minVersionSkew := semver.MustParse("1.10.0") for _, upgradeItem := range upgradePlan.Providers { - if !(upgradeItem.Type == string(clusterctlv1.CoreProviderType) || - (upgradeItem.Type == string(clusterctlv1.BootstrapProviderType) && upgradeItem.ProviderName == config.KubeadmBootstrapProviderName) || - (upgradeItem.Type == string(clusterctlv1.ControlPlaneProviderType) && upgradeItem.ProviderName == config.KubeadmControlPlaneProviderName)) { + if upgradeItem.Type != string(clusterctlv1.CoreProviderType) && + (upgradeItem.Type != string(clusterctlv1.BootstrapProviderType) || upgradeItem.ProviderName != config.KubeadmBootstrapProviderName) && + (upgradeItem.Type != string(clusterctlv1.ControlPlaneProviderType) || upgradeItem.ProviderName != config.KubeadmControlPlaneProviderName) { continue } diff --git a/cmd/clusterctl/client/config/reader_viper.go b/cmd/clusterctl/client/config/reader_viper.go index 02bb984c5876..241045cc210c 100644 --- a/cmd/clusterctl/client/config/reader_viper.go +++ b/cmd/clusterctl/client/config/reader_viper.go @@ -93,8 +93,8 @@ func (v *viperReader) Init(ctx context.Context, path string) error { return errors.Wrap(err, "failed to url parse the config path") } - switch { - case url.Scheme == "https" || url.Scheme == "http": + switch url.Scheme { + case "https", "http": var configDirectory string if len(v.configPaths) > 0 { configDirectory = v.configPaths[0] diff --git a/cmd/clusterctl/client/config/reader_viper_test.go b/cmd/clusterctl/client/config/reader_viper_test.go index aca8f9af4d89..e6613be1e9de 100644 --- a/cmd/clusterctl/client/config/reader_viper_test.go +++ b/cmd/clusterctl/client/config/reader_viper_test.go @@ -34,13 +34,9 @@ func Test_viperReader_Init(t *testing.T) { // Change HOME dir and do not specify config file // (.cluster-api/clusterctl) in it. - clusterctlHomeDir, err := os.MkdirTemp("", "clusterctl-default") - g.Expect(err).ToNot(HaveOccurred()) - defer os.RemoveAll(clusterctlHomeDir) + clusterctlHomeDir := t.TempDir() - dir, err := os.MkdirTemp("", "clusterctl") - g.Expect(err).ToNot(HaveOccurred()) - defer os.RemoveAll(dir) + dir := t.TempDir() configFile := filepath.Join(dir, "clusterctl.yaml") g.Expect(os.WriteFile(configFile, []byte("bar: bar"), 0600)).To(Succeed()) @@ -256,10 +252,7 @@ func Test_viperReader_Set(t *testing.T) { func Test_viperReader_checkDefaultConfig(t *testing.T) { g := NewWithT(t) - dir, err := os.MkdirTemp("", "clusterctl") - g.Expect(err).ToNot(HaveOccurred()) - defer os.RemoveAll(dir) - dir = strings.TrimSuffix(dir, "/") + dir := strings.TrimSuffix(t.TempDir(), "/") configFile := filepath.Join(dir, "clusterctl.yaml") g.Expect(os.WriteFile(configFile, []byte("bar: bar"), 0600)).To(Succeed()) diff --git a/cmd/clusterctl/client/config_test.go b/cmd/clusterctl/client/config_test.go index b85cac9547e9..b39c58be4f63 100644 --- a/cmd/clusterctl/client/config_test.go +++ b/cmd/clusterctl/client/config_test.go @@ -524,10 +524,7 @@ func Test_clusterctlClient_GetClusterTemplate(t *testing.T) { rawTemplate := templateYAML("ns3", "${ CLUSTER_NAME }") - // Template on a file - tmpDir, err := os.MkdirTemp("", "cc") - g.Expect(err).ToNot(HaveOccurred()) - defer os.RemoveAll(tmpDir) + tmpDir := t.TempDir() path := filepath.Join(tmpDir, "cluster-template.yaml") g.Expect(os.WriteFile(path, rawTemplate, 0600)).To(Succeed()) @@ -744,10 +741,7 @@ func Test_clusterctlClient_GetClusterTemplate_onEmptyCluster(t *testing.T) { rawTemplate := templateYAML("ns3", "${ CLUSTER_NAME }") - // Template on a file - tmpDir, err := os.MkdirTemp("", "cc") - g.Expect(err).ToNot(HaveOccurred()) - defer os.RemoveAll(tmpDir) + tmpDir := t.TempDir() path := filepath.Join(tmpDir, "cluster-template.yaml") g.Expect(os.WriteFile(path, rawTemplate, 0600)).To(Succeed()) @@ -1014,9 +1008,7 @@ func Test_clusterctlClient_ProcessYAML(t *testing.T) { template := `v1: ${VAR1:=default1} v2: ${VAR2=default2} v3: ${VAR3:-default3}` - dir, err := os.MkdirTemp("", "clusterctl") - g.Expect(err).ToNot(HaveOccurred()) - defer os.RemoveAll(dir) + dir := t.TempDir() templateFile := filepath.Join(dir, "template.yaml") g.Expect(os.WriteFile(templateFile, []byte(template), 0600)).To(Succeed()) diff --git a/cmd/clusterctl/client/move_test.go b/cmd/clusterctl/client/move_test.go index dfd4287f55fd..42998329ae7c 100644 --- a/cmd/clusterctl/client/move_test.go +++ b/cmd/clusterctl/client/move_test.go @@ -18,7 +18,6 @@ package client import ( "context" - "os" "testing" . "github.com/onsi/gomega" @@ -142,11 +141,7 @@ func Test_clusterctlClient_Move(t *testing.T) { } func Test_clusterctlClient_ToDirectory(t *testing.T) { - dir, err := os.MkdirTemp("/tmp", "cluster-api") - if err != nil { - t.Error(err) - } - defer os.RemoveAll(dir) + dir := t.TempDir() type fields struct { client *fakeClient @@ -207,11 +202,7 @@ func Test_clusterctlClient_ToDirectory(t *testing.T) { } func Test_clusterctlClient_FromDirectory(t *testing.T) { - dir, err := os.MkdirTemp("/tmp", "cluster-api") - if err != nil { - t.Error(err) - } - defer os.RemoveAll(dir) + dir := t.TempDir() type fields struct { client *fakeClient diff --git a/cmd/clusterctl/client/repository/repository_local.go b/cmd/clusterctl/client/repository/repository_local.go index 73bb85a70eae..ad00ebd7a6fa 100644 --- a/cmd/clusterctl/client/repository/repository_local.go +++ b/cmd/clusterctl/client/repository/repository_local.go @@ -86,12 +86,13 @@ func (r *localRepository) ComponentsPath() string { func (r *localRepository) GetFile(ctx context.Context, version, fileName string) ([]byte, error) { var err error - if version == latestVersionTag { + switch version { + case latestVersionTag: version, err = latestRelease(ctx, r) if err != nil { return nil, errors.Wrapf(err, "failed to get the latest release") } - } else if version == "" { + case "": version = r.defaultVersion } diff --git a/cmd/clusterctl/client/tree/tree.go b/cmd/clusterctl/client/tree/tree.go index 6d080a0556eb..92d4c3cf37cd 100644 --- a/cmd/clusterctl/client/tree/tree.go +++ b/cmd/clusterctl/client/tree/tree.go @@ -387,7 +387,7 @@ func minLastTransitionTime(a, b *metav1.Condition) metav1.Time { if a == nil { return b.LastTransitionTime } - if a.LastTransitionTime.Time.After(b.LastTransitionTime.Time) { + if a.LastTransitionTime.After(b.LastTransitionTime.Time) { return b.LastTransitionTime } return a.LastTransitionTime @@ -434,7 +434,7 @@ func minLastTransitionTimeV1Beta1(a, b *clusterv1.Condition) metav1.Time { if a == nil { return b.LastTransitionTime } - if a.LastTransitionTime.Time.After(b.LastTransitionTime.Time) { + if a.LastTransitionTime.After(b.LastTransitionTime.Time) { return b.LastTransitionTime } return a.LastTransitionTime diff --git a/cmd/clusterctl/client/tree/tree_test.go b/cmd/clusterctl/client/tree/tree_test.go index 058eaf4299de..aea2ab655207 100644 --- a/cmd/clusterctl/client/tree/tree_test.go +++ b/cmd/clusterctl/client/tree/tree_test.go @@ -314,7 +314,7 @@ func Test_hasSameReadyStatusSeverityAndReason(t *testing.T) { func Test_minLastTransitionTime(t *testing.T) { now := &metav1.Condition{Type: "now", LastTransitionTime: metav1.Now()} - beforeNow := &metav1.Condition{Type: "beforeNow", LastTransitionTime: metav1.Time{Time: now.LastTransitionTime.Time.Add(-1 * time.Hour)}} + beforeNow := &metav1.Condition{Type: "beforeNow", LastTransitionTime: metav1.Time{Time: now.LastTransitionTime.Add(-1 * time.Hour)}} type args struct { a *metav1.Condition b *metav1.Condition @@ -377,7 +377,7 @@ func Test_minLastTransitionTime(t *testing.T) { func Test_minLastTransitionTimeV1Beta1(t *testing.T) { now := &clusterv1.Condition{Type: "now", LastTransitionTime: metav1.Now()} - beforeNow := &clusterv1.Condition{Type: "beforeNow", LastTransitionTime: metav1.Time{Time: now.LastTransitionTime.Time.Add(-1 * time.Hour)}} + beforeNow := &clusterv1.Condition{Type: "beforeNow", LastTransitionTime: metav1.Time{Time: now.LastTransitionTime.Add(-1 * time.Hour)}} type args struct { a *clusterv1.Condition b *clusterv1.Condition @@ -582,7 +582,7 @@ func Test_createGroupNode(t *testing.T) { func Test_createV1Beta1GroupNode(t *testing.T) { now := metav1.Now() - beforeNow := metav1.Time{Time: now.Time.Add(-1 * time.Hour)} + beforeNow := metav1.Time{Time: now.Add(-1 * time.Hour)} obj := &clusterv1.Machine{ TypeMeta: metav1.TypeMeta{ @@ -664,7 +664,7 @@ func Test_createV1Beta1GroupNode(t *testing.T) { func Test_updateGroupNode(t *testing.T) { now := metav1.Now() - beforeNow := metav1.Time{Time: now.Time.Add(-1 * time.Hour)} + beforeNow := metav1.Time{Time: now.Add(-1 * time.Hour)} group := &NodeObject{ TypeMeta: metav1.TypeMeta{ @@ -745,7 +745,7 @@ func Test_updateGroupNode(t *testing.T) { func Test_updateV1Beta1GroupNode(t *testing.T) { now := metav1.Now() - beforeNow := metav1.Time{Time: now.Time.Add(-1 * time.Hour)} + beforeNow := metav1.Time{Time: now.Add(-1 * time.Hour)} group := &NodeObject{ TypeMeta: metav1.TypeMeta{ diff --git a/cmd/clusterctl/cmd/config_repositories_test.go b/cmd/clusterctl/cmd/config_repositories_test.go index 85634a680b5c..109108ff591c 100644 --- a/cmd/clusterctl/cmd/config_repositories_test.go +++ b/cmd/clusterctl/cmd/config_repositories_test.go @@ -30,9 +30,7 @@ func Test_runGetRepositories(t *testing.T) { t.Run("prints output", func(t *testing.T) { g := NewWithT(t) - tmpDir, err := os.MkdirTemp("", "cc") - g.Expect(err).ToNot(HaveOccurred()) - defer os.RemoveAll(tmpDir) + tmpDir := t.TempDir() path := filepath.Join(tmpDir, "clusterctl.yaml") g.Expect(os.WriteFile(path, []byte(template), 0600)).To(Succeed()) @@ -47,9 +45,10 @@ func Test_runGetRepositories(t *testing.T) { // Use gomega's BeComparableTo as opposed to Equals to compare output which uses gocmp under // the hood and correctly prints any differences between the two strings. - if val == RepositoriesOutputText { + switch val { + case RepositoriesOutputText: g.Expect(string(out)).To(BeComparableTo(expectedOutputText)) - } else if val == RepositoriesOutputYaml { + case RepositoriesOutputYaml: g.Expect(string(out)).To(BeComparableTo(expectedOutputYaml)) } } @@ -69,9 +68,7 @@ func Test_runGetRepositories(t *testing.T) { t.Run("returns error for bad template", func(t *testing.T) { g := NewWithT(t) - tmpDir, err := os.MkdirTemp("", "cc") - g.Expect(err).ToNot(HaveOccurred()) - defer os.RemoveAll(tmpDir) + tmpDir := t.TempDir() path := filepath.Join(tmpDir, "clusterctl.yaml") g.Expect(os.WriteFile(path, []byte("providers: foobar"), 0600)).To(Succeed()) diff --git a/cmd/clusterctl/cmd/root.go b/cmd/clusterctl/cmd/root.go index d5b6b8e417f3..aab9f3460396 100644 --- a/cmd/clusterctl/cmd/root.go +++ b/cmd/clusterctl/cmd/root.go @@ -319,7 +319,7 @@ func handlePluginCommand(pluginHandler pluginHandler, cmdArgs []string, minArgs if strings.HasPrefix(arg, "-") { break } - remainingArgs = append(remainingArgs, strings.Replace(arg, "-", "_", -1)) + remainingArgs = append(remainingArgs, strings.ReplaceAll(arg, "-", "_")) } if len(remainingArgs) == 0 { diff --git a/cmd/clusterctl/cmd/upgrade.go b/cmd/clusterctl/cmd/upgrade.go index 7721d8a04c8e..7458e22395fa 100644 --- a/cmd/clusterctl/cmd/upgrade.go +++ b/cmd/clusterctl/cmd/upgrade.go @@ -42,9 +42,9 @@ func init() { func sortUpgradeItems(plan client.UpgradePlan) { sort.Slice(plan.Providers, func(i, j int) bool { - return plan.Providers[i].Provider.Type < plan.Providers[j].Provider.Type || - (plan.Providers[i].Provider.Type == plan.Providers[j].Provider.Type && plan.Providers[i].Provider.Name < plan.Providers[j].Provider.Name) || - (plan.Providers[i].Provider.Type == plan.Providers[j].Provider.Type && plan.Providers[i].Provider.Name == plan.Providers[j].Provider.Name && plan.Providers[i].Provider.Namespace < plan.Providers[j].Provider.Namespace) + return plan.Providers[i].Type < plan.Providers[j].Type || + (plan.Providers[i].Type == plan.Providers[j].Type && plan.Providers[i].Name < plan.Providers[j].Name) || + (plan.Providers[i].Type == plan.Providers[j].Type && plan.Providers[i].Name == plan.Providers[j].Name && plan.Providers[i].Namespace < plan.Providers[j].Namespace) }) } diff --git a/cmd/clusterctl/cmd/upgrade_plan.go b/cmd/clusterctl/cmd/upgrade_plan.go index 0709bc742c0b..bb0fe4b1fd6f 100644 --- a/cmd/clusterctl/cmd/upgrade_plan.go +++ b/cmd/clusterctl/cmd/upgrade_plan.go @@ -116,7 +116,7 @@ func runUpgradePlan() error { w := tabwriter.NewWriter(os.Stdout, 10, 4, 3, ' ', 0) fmt.Fprintln(w, "NAME\tNAMESPACE\tTYPE\tCURRENT VERSION\tNEXT VERSION") for _, upgradeItem := range plan.Providers { - fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\n", upgradeItem.Provider.Name, upgradeItem.Provider.Namespace, upgradeItem.Provider.Type, upgradeItem.Provider.Version, prettifyTargetVersion(upgradeItem.NextVersion)) + fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\n", upgradeItem.Name, upgradeItem.Namespace, upgradeItem.Type, upgradeItem.Version, prettifyTargetVersion(upgradeItem.NextVersion)) if upgradeItem.NextVersion != "" { upgradeAvailable = true } diff --git a/cmd/clusterctl/internal/test/fake_objects.go b/cmd/clusterctl/internal/test/fake_objects.go index 9a411a3be2ae..14e9d8994e41 100644 --- a/cmd/clusterctl/internal/test/fake_objects.go +++ b/cmd/clusterctl/internal/test/fake_objects.go @@ -570,6 +570,9 @@ func NewStaticBootstrapConfig(name string) *clusterv1.Bootstrap { } } +// NewBootstrapConfigTemplate return a clusterv1.Bootstrap where +// - the ConfigRef is set to the provided machineBootstrapTemplate +// - the DataSecretName is nil. func NewBootstrapConfigTemplate(machineBootstrapTemplate *fakebootstrap.GenericBootstrapConfigTemplate) *clusterv1.Bootstrap { return &clusterv1.Bootstrap{ ConfigRef: &corev1.ObjectReference{ @@ -581,6 +584,9 @@ func NewBootstrapConfigTemplate(machineBootstrapTemplate *fakebootstrap.GenericB } } +// NewBootstrapConfig return a clusterv1.Bootstrap where +// - the ConfigRef is set to the provided machineBootstrap +// - the DataSecretName is nil. func NewBootstrapConfig(machineBootstrap *fakebootstrap.GenericBootstrapConfig) *clusterv1.Bootstrap { return &clusterv1.Bootstrap{ ConfigRef: &corev1.ObjectReference{ diff --git a/controllers/clustercache/cluster_accessor_test.go b/controllers/clustercache/cluster_accessor_test.go index f86ad3648ddf..54bda7cfa752 100644 --- a/controllers/clustercache/cluster_accessor_test.go +++ b/controllers/clustercache/cluster_accessor_test.go @@ -56,8 +56,8 @@ func TestConnect(t *testing.T) { g.Expect(env.CreateAndWait(ctx, testCluster)).To(Succeed()) defer func() { g.Expect(env.CleanupAndWait(ctx, testCluster)).To(Succeed()) }() - config := buildClusterAccessorConfig(env.Manager.GetScheme(), Options{ - SecretClient: env.Manager.GetClient(), + config := buildClusterAccessorConfig(env.GetScheme(), Options{ + SecretClient: env.GetClient(), Client: ClientOptions{ UserAgent: remote.DefaultClusterAPIUserAgent("test-controller-manager"), Timeout: 10 * time.Second, @@ -157,8 +157,8 @@ func TestDisconnect(t *testing.T) { g.Expect(env.CreateAndWait(ctx, kubeconfigSecret)).To(Succeed()) defer func() { g.Expect(env.CleanupAndWait(ctx, kubeconfigSecret)).To(Succeed()) }() - config := buildClusterAccessorConfig(env.Manager.GetScheme(), Options{ - SecretClient: env.Manager.GetClient(), + config := buildClusterAccessorConfig(env.GetScheme(), Options{ + SecretClient: env.GetClient(), Client: ClientOptions{ UserAgent: remote.DefaultClusterAPIUserAgent("test-controller-manager"), Timeout: 10 * time.Second, @@ -317,8 +317,8 @@ func TestWatch(t *testing.T) { g.Expect(env.CreateAndWait(ctx, kubeconfigSecret)).To(Succeed()) defer func() { g.Expect(env.CleanupAndWait(ctx, kubeconfigSecret)).To(Succeed()) }() - config := buildClusterAccessorConfig(env.Manager.GetScheme(), Options{ - SecretClient: env.Manager.GetClient(), + config := buildClusterAccessorConfig(env.GetScheme(), Options{ + SecretClient: env.GetClient(), Client: ClientOptions{ UserAgent: remote.DefaultClusterAPIUserAgent("test-controller-manager"), Timeout: 10 * time.Second, diff --git a/controllers/clustercache/cluster_cache_test.go b/controllers/clustercache/cluster_cache_test.go index a7fc28a879e5..49c0d4208688 100644 --- a/controllers/clustercache/cluster_cache_test.go +++ b/controllers/clustercache/cluster_cache_test.go @@ -62,7 +62,7 @@ func TestReconcile(t *testing.T) { defer func() { g.Expect(client.IgnoreNotFound(env.CleanupAndWait(ctx, testCluster))).To(Succeed()) }() opts := Options{ - SecretClient: env.Manager.GetClient(), + SecretClient: env.GetClient(), Client: ClientOptions{ UserAgent: remote.DefaultClusterAPIUserAgent("test-controller-manager"), Timeout: 10 * time.Second, @@ -71,10 +71,10 @@ func TestReconcile(t *testing.T) { Indexes: []CacheOptionsIndex{NodeProviderIDIndex}, }, } - accessorConfig := buildClusterAccessorConfig(env.Manager.GetScheme(), opts, nil) + accessorConfig := buildClusterAccessorConfig(env.GetScheme(), opts, nil) cc := &clusterCache{ // Use APIReader to avoid cache issues when reading the Cluster object. - client: env.Manager.GetAPIReader(), + client: env.GetAPIReader(), clusterAccessorConfig: accessorConfig, clusterAccessors: make(map[client.ObjectKey]*clusterAccessor), cacheCtx: context.Background(), @@ -517,7 +517,7 @@ func TestClusterCacheConcurrency(t *testing.T) { // Set up ClusterCache. cc, err := SetupWithManager(ctx, env.Manager, Options{ - SecretClient: env.Manager.GetClient(), + SecretClient: env.GetClient(), Cache: CacheOptions{ Indexes: []CacheOptionsIndex{NodeProviderIDIndex}, }, diff --git a/controllers/crdmigrator/crd_migrator.go b/controllers/crdmigrator/crd_migrator.go index 149dc1da4892..0998e7d3cd04 100644 --- a/controllers/crdmigrator/crd_migrator.go +++ b/controllers/crdmigrator/crd_migrator.go @@ -563,5 +563,5 @@ type objectEntry struct { } func (r objectEntry) Key() string { - return fmt.Sprintf("%s %s %d", r.Kind, r.ObjectKey.String(), r.CRDGeneration) + return fmt.Sprintf("%s %s %d", r.Kind, r.String(), r.CRDGeneration) } diff --git a/controllers/remote/cluster_cache_healthcheck_test.go b/controllers/remote/cluster_cache_healthcheck_test.go index 129eea48e4cc..ba7f46c532d7 100644 --- a/controllers/remote/cluster_cache_healthcheck_test.go +++ b/controllers/remote/cluster_cache_healthcheck_test.go @@ -75,7 +75,7 @@ func TestClusterCacheHealthCheck(t *testing.T) { go func() { g.Expect(mgr.Start(mgrContext)).To(Succeed()) }() - <-env.Manager.Elected() + <-env.Elected() k8sClient = mgr.GetClient() diff --git a/controllers/remote/cluster_cache_reconciler_test.go b/controllers/remote/cluster_cache_reconciler_test.go index 4180d7844ce4..0703369c7f93 100644 --- a/controllers/remote/cluster_cache_reconciler_test.go +++ b/controllers/remote/cluster_cache_reconciler_test.go @@ -104,7 +104,7 @@ func TestClusterCacheReconciler(t *testing.T) { go func() { g.Expect(mgr.Start(mgrContext)).To(Succeed()) }() - <-env.Manager.Elected() + <-env.Elected() k8sClient = mgr.GetClient() diff --git a/controlplane/kubeadm/internal/cluster_test.go b/controlplane/kubeadm/internal/cluster_test.go index 7256543bf2c5..283376eeabe3 100644 --- a/controlplane/kubeadm/internal/cluster_test.go +++ b/controlplane/kubeadm/internal/cluster_test.go @@ -212,7 +212,7 @@ func TestGetWorkloadCluster(t *testing.T) { } clusterCache, err := clustercache.SetupWithManager(ctx, env.Manager, clustercache.Options{ - SecretClient: env.Manager.GetClient(), + SecretClient: env.GetClient(), Client: clustercache.ClientOptions{ UserAgent: remote.DefaultClusterAPIUserAgent("test-controller-manager"), Cache: clustercache.ClientCacheOptions{ @@ -304,7 +304,7 @@ func machineListForTestGetMachinesForCluster() *clusterv1.MachineList { } } controlPlaneMachine := machine("first-machine") - controlPlaneMachine.ObjectMeta.Labels[clusterv1.MachineControlPlaneLabel] = "" + controlPlaneMachine.Labels[clusterv1.MachineControlPlaneLabel] = "" controlPlaneMachine.OwnerReferences = ownedRef return &clusterv1.MachineList{ diff --git a/controlplane/kubeadm/internal/control_plane.go b/controlplane/kubeadm/internal/control_plane.go index 5fe4b4f44ace..c24e26c8614d 100644 --- a/controlplane/kubeadm/internal/control_plane.go +++ b/controlplane/kubeadm/internal/control_plane.go @@ -427,10 +427,10 @@ func (c *ControlPlane) StatusToLogKeyAndValues(newMachine, deletedMachine *clust // TODO (v1beta2): test for v1beta2 conditions for _, condition := range controlPlaneMachineHealthConditions { if v1beta1conditions.IsUnknown(m, condition) { - notes = append(notes, strings.Replace(string(condition), "Healthy", " health unknown", -1)) + notes = append(notes, strings.ReplaceAll(string(condition), "Healthy", " health unknown")) } if v1beta1conditions.IsFalse(m, condition) { - notes = append(notes, strings.Replace(string(condition), "Healthy", " not healthy", -1)) + notes = append(notes, strings.ReplaceAll(string(condition), "Healthy", " not healthy")) } } diff --git a/controlplane/kubeadm/internal/controllers/controller.go b/controlplane/kubeadm/internal/controllers/controller.go index 5ace88062930..0b268233e269 100644 --- a/controlplane/kubeadm/internal/controllers/controller.go +++ b/controlplane/kubeadm/internal/controllers/controller.go @@ -237,7 +237,7 @@ func (r *KubeadmControlPlaneReconciler) Reconcile(ctx context.Context, req ctrl. } // Only requeue if there is no error, Requeue or RequeueAfter and the object does not have a deletion timestamp. - if reterr == nil && res.IsZero() && kcp.ObjectMeta.DeletionTimestamp.IsZero() { + if reterr == nil && res.IsZero() && kcp.DeletionTimestamp.IsZero() { // Make KCP requeue in case node status is not ready, so we can check for node status without waiting for a full // resync (by default 10 minutes). // The alternative solution would be to watch the control plane nodes in the Cluster - similar to how the @@ -257,7 +257,7 @@ func (r *KubeadmControlPlaneReconciler) Reconcile(ctx context.Context, req ctrl. } }() - if !kcp.ObjectMeta.DeletionTimestamp.IsZero() { + if !kcp.DeletionTimestamp.IsZero() { // Handle deletion reconciliation loop. return r.reconcileDelete(ctx, controlPlane) } @@ -292,12 +292,12 @@ func (r *KubeadmControlPlaneReconciler) initControlPlaneScope(ctx context.Contex // If we are not deleting the CP, adopt stand alone CP machines if any adoptableMachines := controlPlaneMachines.Filter(collections.AdoptableControlPlaneMachines(cluster.Name)) - if kcp.ObjectMeta.DeletionTimestamp.IsZero() && len(adoptableMachines) > 0 { + if kcp.DeletionTimestamp.IsZero() && len(adoptableMachines) > 0 { return nil, true, r.adoptMachines(ctx, kcp, adoptableMachines, cluster) } ownedMachines := controlPlaneMachines.Filter(collections.OwnedMachines(kcp)) - if kcp.ObjectMeta.DeletionTimestamp.IsZero() && len(ownedMachines) != len(controlPlaneMachines) { + if kcp.DeletionTimestamp.IsZero() && len(ownedMachines) != len(controlPlaneMachines) { err := errors.New("not all control plane machines are owned by this KubeadmControlPlane, refusing to operate in mixed management mode") log.Error(err, "KCP cannot reconcile") return nil, false, err diff --git a/controlplane/kubeadm/internal/controllers/controller_test.go b/controlplane/kubeadm/internal/controllers/controller_test.go index 170ac4033e31..6352608c7118 100644 --- a/controlplane/kubeadm/internal/controllers/controller_test.go +++ b/controlplane/kubeadm/internal/controllers/controller_test.go @@ -412,8 +412,8 @@ func TestReconcilePaused(t *testing.T) { // Test: kcp is paused and cluster is not cluster.Spec.Paused = false - kcp.ObjectMeta.Annotations = map[string]string{} - kcp.ObjectMeta.Annotations[clusterv1.PausedAnnotation] = "paused" + kcp.Annotations = map[string]string{} + kcp.Annotations[clusterv1.PausedAnnotation] = "paused" _, err = r.Reconcile(ctx, ctrl.Request{NamespacedName: util.ObjectKey(kcp)}) g.Expect(err).ToNot(HaveOccurred()) } diff --git a/controlplane/kubeadm/internal/controllers/remediation.go b/controlplane/kubeadm/internal/controllers/remediation.go index 6fb05274754f..588a7afa24c3 100644 --- a/controlplane/kubeadm/internal/controllers/remediation.go +++ b/controlplane/kubeadm/internal/controllers/remediation.go @@ -59,7 +59,7 @@ func (r *KubeadmControlPlaneReconciler) reconcileUnhealthyMachines(ctx context.C shouldCleanupV1Beta1 := v1beta1conditions.IsTrue(m, clusterv1.MachineHealthCheckSucceededV1Beta1Condition) && v1beta1conditions.IsFalse(m, clusterv1.MachineOwnerRemediatedV1Beta1Condition) shouldCleanup := conditions.IsTrue(m, clusterv1.MachineHealthCheckSucceededCondition) && conditions.IsFalse(m, clusterv1.MachineOwnerRemediatedCondition) - if !(shouldCleanupV1Beta1 || shouldCleanup) { + if !shouldCleanupV1Beta1 && !shouldCleanup { continue } @@ -110,11 +110,11 @@ func (r *KubeadmControlPlaneReconciler) reconcileUnhealthyMachines(ctx context.C } // Returns if the machine is in the process of being deleted. - if !machineToBeRemediated.ObjectMeta.DeletionTimestamp.IsZero() { + if !machineToBeRemediated.DeletionTimestamp.IsZero() { return ctrl.Result{}, nil } - initialized := false + var initialized bool if controlPlane.KCP.Status.Initialization != nil && controlPlane.KCP.Status.Initialization.ControlPlaneInitialized { initialized = true } diff --git a/controlplane/kubeadm/internal/controllers/remediation_test.go b/controlplane/kubeadm/internal/controllers/remediation_test.go index 4e3c60645a5a..2caeca446f0e 100644 --- a/controlplane/kubeadm/internal/controllers/remediation_test.go +++ b/controlplane/kubeadm/internal/controllers/remediation_test.go @@ -147,7 +147,7 @@ func TestReconcileUnhealthyMachines(t *testing.T) { var removeFinalizer = func(g *WithT, m *clusterv1.Machine) { patchHelper, err := patch.NewHelper(m, env.GetClient()) g.Expect(err).ToNot(HaveOccurred()) - m.ObjectMeta.Finalizers = nil + m.Finalizers = nil g.Expect(patchHelper.Patch(ctx, m)).To(Succeed()) } @@ -1355,7 +1355,7 @@ func TestReconcileUnhealthyMachinesSequences(t *testing.T) { var removeFinalizer = func(g *WithT, m *clusterv1.Machine) { patchHelper, err := patch.NewHelper(m, env.GetClient()) g.Expect(err).ToNot(HaveOccurred()) - m.ObjectMeta.Finalizers = nil + m.Finalizers = nil g.Expect(patchHelper.Patch(ctx, m)).To(Succeed()) } diff --git a/controlplane/kubeadm/internal/controllers/scale_test.go b/controlplane/kubeadm/internal/controllers/scale_test.go index fb0434046a94..7edf7de51cff 100644 --- a/controlplane/kubeadm/internal/controllers/scale_test.go +++ b/controlplane/kubeadm/internal/controllers/scale_test.go @@ -808,13 +808,13 @@ func withFailureDomain(fd string) machineOpt { func withAnnotation(annotation string) machineOpt { return func(m *clusterv1.Machine) { - m.ObjectMeta.Annotations = map[string]string{annotation: ""} + m.Annotations = map[string]string{annotation: ""} } } func withLabels(labels map[string]string) machineOpt { return func(m *clusterv1.Machine) { - m.ObjectMeta.Labels = labels + m.Labels = labels } } diff --git a/controlplane/kubeadm/internal/webhooks/scale.go b/controlplane/kubeadm/internal/webhooks/scale.go index b8091cb1974d..209935b571a2 100644 --- a/controlplane/kubeadm/internal/webhooks/scale.go +++ b/controlplane/kubeadm/internal/webhooks/scale.go @@ -58,9 +58,9 @@ func (v *ScaleValidator) Handle(ctx context.Context, req admission.Request) admi } kcp := &controlplanev1.KubeadmControlPlane{} - kcpKey := types.NamespacedName{Namespace: scale.ObjectMeta.Namespace, Name: scale.ObjectMeta.Name} + kcpKey := types.NamespacedName{Namespace: scale.Namespace, Name: scale.Name} if err = v.Client.Get(ctx, kcpKey, kcp); err != nil { - return admission.Errored(http.StatusInternalServerError, errors.Wrapf(err, "failed to get KubeadmControlPlane %s/%s", scale.ObjectMeta.Namespace, scale.ObjectMeta.Name)) + return admission.Errored(http.StatusInternalServerError, errors.Wrapf(err, "failed to get KubeadmControlPlane %s/%s", scale.Namespace, scale.Name)) } if scale.Spec.Replicas == 0 { diff --git a/controlplane/kubeadm/internal/webhooks/scale_test.go b/controlplane/kubeadm/internal/webhooks/scale_test.go index 198085661df4..a5ced384162e 100644 --- a/controlplane/kubeadm/internal/webhooks/scale_test.go +++ b/controlplane/kubeadm/internal/webhooks/scale_test.go @@ -129,7 +129,7 @@ func TestKubeadmControlPlaneValidateScale(t *testing.T) { } kcpExternalEtcd := kcpManagedEtcd.DeepCopy() - kcpExternalEtcd.ObjectMeta.Name = "kcp-external-etcd" + kcpExternalEtcd.Name = "kcp-external-etcd" kcpExternalEtcd.Spec.KubeadmConfigSpec.ClusterConfiguration.Etcd.External = &bootstrapv1.ExternalEtcd{} tests := []struct { diff --git a/controlplane/kubeadm/internal/workload_cluster_conditions.go b/controlplane/kubeadm/internal/workload_cluster_conditions.go index 71811e11d252..ce17b1995b1b 100644 --- a/controlplane/kubeadm/internal/workload_cluster_conditions.go +++ b/controlplane/kubeadm/internal/workload_cluster_conditions.go @@ -566,7 +566,7 @@ func (w *Workload) UpdateStaticPodConditions(ctx context.Context, controlPlane * } // If the machine is deleting, report all the conditions as deleting - if !machine.ObjectMeta.DeletionTimestamp.IsZero() { + if !machine.DeletionTimestamp.IsZero() { for _, condition := range allMachinePodConditions { v1beta1conditions.MarkFalse(machine, condition, clusterv1.DeletingV1Beta1Reason, clusterv1.ConditionSeverityInfo, "") } diff --git a/exp/internal/controllers/machinepool_controller.go b/exp/internal/controllers/machinepool_controller.go index 22079aa88fd4..f8a8290f36ac 100644 --- a/exp/internal/controllers/machinepool_controller.go +++ b/exp/internal/controllers/machinepool_controller.go @@ -172,9 +172,9 @@ func (r *MachinePoolReconciler) Reconcile(ctx context.Context, req ctrl.Request) return ctrl.Result{}, err } - cluster, err := util.GetClusterByName(ctx, r.Client, mp.ObjectMeta.Namespace, mp.Spec.ClusterName) + cluster, err := util.GetClusterByName(ctx, r.Client, mp.Namespace, mp.Spec.ClusterName) if err != nil { - log.Error(err, "Failed to get Cluster for MachinePool.", "MachinePool", klog.KObj(mp), "Cluster", klog.KRef(mp.ObjectMeta.Namespace, mp.Spec.ClusterName)) + log.Error(err, "Failed to get Cluster for MachinePool.", "MachinePool", klog.KObj(mp), "Cluster", klog.KRef(mp.Namespace, mp.Spec.ClusterName)) return ctrl.Result{}, errors.Wrapf(err, "failed to get cluster %q for machinepool %q in namespace %q", mp.Spec.ClusterName, mp.Name, mp.Namespace) } @@ -230,7 +230,7 @@ func (r *MachinePoolReconciler) Reconcile(ctx context.Context, req ctrl.Request) mp.Labels[clusterv1.ClusterNameLabel] = mp.Spec.ClusterName // Handle deletion reconciliation loop. - if !mp.ObjectMeta.DeletionTimestamp.IsZero() { + if !mp.DeletionTimestamp.IsZero() { return ctrl.Result{}, r.reconcileDelete(ctx, cluster, mp) } @@ -315,7 +315,7 @@ func (r *MachinePoolReconciler) reconcileDeleteNodes(ctx context.Context, cluste // isMachinePoolDeleteTimeoutPassed check the machinePool node delete time out. func (r *MachinePoolReconciler) isMachinePoolNodeDeleteTimeoutPassed(machinePool *expv1.MachinePool) bool { if !machinePool.DeletionTimestamp.IsZero() && machinePool.Spec.Template.Spec.NodeDeletionTimeout != nil { - if machinePool.Spec.Template.Spec.NodeDeletionTimeout.Duration.Nanoseconds() != 0 { + if machinePool.Spec.Template.Spec.NodeDeletionTimeout.Nanoseconds() != 0 { deleteTimePlusDuration := machinePool.DeletionTimestamp.Add(machinePool.Spec.Template.Spec.NodeDeletionTimeout.Duration) return deleteTimePlusDuration.Before(time.Now()) } diff --git a/exp/internal/webhooks/machinepool.go b/exp/internal/webhooks/machinepool.go index 3eede3ba5c00..9f5c7011e6ea 100644 --- a/exp/internal/webhooks/machinepool.go +++ b/exp/internal/webhooks/machinepool.go @@ -217,7 +217,7 @@ func (webhook *MachinePool) validate(oldObj, newObj *expv1.MachinePool) error { } // Validate the metadata of the MachinePool template. - allErrs = append(allErrs, newObj.Spec.Template.ObjectMeta.Validate(specPath.Child("template", "metadata"))...) + allErrs = append(allErrs, newObj.Spec.Template.Validate(specPath.Child("template", "metadata"))...) if len(allErrs) == 0 { return nil diff --git a/exp/ipam/api/v1alpha1/conversion.go b/exp/ipam/api/v1alpha1/conversion.go index b8d4afc6147f..386f307fc493 100644 --- a/exp/ipam/api/v1alpha1/conversion.go +++ b/exp/ipam/api/v1alpha1/conversion.go @@ -56,14 +56,14 @@ func (src *IPAddressClaim) ConvertTo(dstRaw conversion.Hub) error { clusterv1beta1.Convert_v1beta1_Conditions_To_v1beta2_Deprecated_V1Beta1_Conditions(&src.Status.Conditions, &dst.Status.Deprecated.V1Beta1.Conditions) } - if src.ObjectMeta.Labels != nil { - dst.Spec.ClusterName = src.ObjectMeta.Labels[clusterv1.ClusterNameLabel] - if dst.ObjectMeta.Annotations != nil { - if clusterNameLabelWasSet, ok := dst.ObjectMeta.Annotations["conversion.cluster.x-k8s.io/cluster-name-label-set"]; ok { + if src.Labels != nil { + dst.Spec.ClusterName = src.Labels[clusterv1.ClusterNameLabel] + if dst.Annotations != nil { + if clusterNameLabelWasSet, ok := dst.Annotations["conversion.cluster.x-k8s.io/cluster-name-label-set"]; ok { if clusterNameLabelWasSet == "false" { - delete(dst.ObjectMeta.Labels, clusterv1.ClusterNameLabel) + delete(dst.Labels, clusterv1.ClusterNameLabel) } - delete(dst.ObjectMeta.Annotations, "conversion.cluster.x-k8s.io/cluster-name-label-set") + delete(dst.Annotations, "conversion.cluster.x-k8s.io/cluster-name-label-set") } } } @@ -99,16 +99,16 @@ func (dst *IPAddressClaim) ConvertFrom(srcRaw conversion.Hub) error { } if src.Spec.ClusterName != "" { - if dst.ObjectMeta.Labels == nil { - dst.ObjectMeta.Labels = map[string]string{} + if dst.Labels == nil { + dst.Labels = map[string]string{} } - if _, ok := dst.ObjectMeta.Labels[clusterv1.ClusterNameLabel]; !ok { - if dst.ObjectMeta.Annotations == nil { - dst.ObjectMeta.Annotations = map[string]string{} + if _, ok := dst.Labels[clusterv1.ClusterNameLabel]; !ok { + if dst.Annotations == nil { + dst.Annotations = map[string]string{} } - dst.ObjectMeta.Annotations["conversion.cluster.x-k8s.io/cluster-name-label-set"] = "false" + dst.Annotations["conversion.cluster.x-k8s.io/cluster-name-label-set"] = "false" } - dst.ObjectMeta.Labels[clusterv1.ClusterNameLabel] = src.Spec.ClusterName + dst.Labels[clusterv1.ClusterNameLabel] = src.Spec.ClusterName } // Preserve Hub data on down-conversion except for metadata diff --git a/exp/ipam/internal/webhooks/ipaddress.go b/exp/ipam/internal/webhooks/ipaddress.go index b429c8b08632..131de59830dd 100644 --- a/exp/ipam/internal/webhooks/ipaddress.go +++ b/exp/ipam/internal/webhooks/ipaddress.go @@ -150,9 +150,9 @@ func (webhook *IPAddress) validate(ctx context.Context, ip *ipamv1.IPAddress) er } claim := &ipamv1.IPAddressClaim{} - err = webhook.Client.Get(ctx, types.NamespacedName{Name: ip.Spec.ClaimRef.Name, Namespace: ip.ObjectMeta.Namespace}, claim) + err = webhook.Client.Get(ctx, types.NamespacedName{Name: ip.Spec.ClaimRef.Name, Namespace: ip.Namespace}, claim) if err != nil && !apierrors.IsNotFound(err) { - log.Error(err, "Failed to fetch claim", "IPAddressClaim", klog.KRef(ip.ObjectMeta.Namespace, ip.Spec.ClaimRef.Name)) + log.Error(err, "Failed to fetch claim", "IPAddressClaim", klog.KRef(ip.Namespace, ip.Spec.ClaimRef.Name)) allErrs = append(allErrs, field.InternalError( specPath.Child("claimRef"), @@ -162,10 +162,10 @@ func (webhook *IPAddress) validate(ctx context.Context, ip *ipamv1.IPAddress) er } if claim.Name != "" && // only report non-matching pool if the claim exists - !(ip.Spec.PoolRef.APIGroup != nil && claim.Spec.PoolRef.APIGroup != nil && - *ip.Spec.PoolRef.APIGroup == *claim.Spec.PoolRef.APIGroup && - ip.Spec.PoolRef.Kind == claim.Spec.PoolRef.Kind && - ip.Spec.PoolRef.Name == claim.Spec.PoolRef.Name) { + (ip.Spec.PoolRef.APIGroup == nil || claim.Spec.PoolRef.APIGroup == nil || + *ip.Spec.PoolRef.APIGroup != *claim.Spec.PoolRef.APIGroup || + ip.Spec.PoolRef.Kind != claim.Spec.PoolRef.Kind || + ip.Spec.PoolRef.Name != claim.Spec.PoolRef.Name) { allErrs = append(allErrs, field.Invalid( specPath.Child("poolRef"), diff --git a/exp/runtime/internal/controllers/extensionconfig_controller.go b/exp/runtime/internal/controllers/extensionconfig_controller.go index b0dde0b0dd75..f243ceba1178 100644 --- a/exp/runtime/internal/controllers/extensionconfig_controller.go +++ b/exp/runtime/internal/controllers/extensionconfig_controller.go @@ -140,7 +140,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Resu } // Handle deletion reconciliation loop. - if !extensionConfig.ObjectMeta.DeletionTimestamp.IsZero() { + if !extensionConfig.DeletionTimestamp.IsZero() { return r.reconcileDelete(ctx, extensionConfig) } diff --git a/exp/runtime/server/server.go b/exp/runtime/server/server.go index 540636bc04d3..d9af20b7e680 100644 --- a/exp/runtime/server/server.go +++ b/exp/runtime/server/server.go @@ -250,7 +250,7 @@ func (s *Server) Start(ctx context.Context) error { handler := h wrappedHandler := s.wrapHandler(handler) - s.Server.Register(handlerPath, http.HandlerFunc(wrappedHandler)) + s.Register(handlerPath, http.HandlerFunc(wrappedHandler)) } return s.Server.Start(ctx) diff --git a/exp/topology/scope/blueprint.go b/exp/topology/scope/blueprint.go index 2552f863dea0..502848bd44da 100644 --- a/exp/topology/scope/blueprint.go +++ b/exp/topology/scope/blueprint.go @@ -104,7 +104,7 @@ func (b *ClusterBlueprint) IsControlPlaneMachineHealthCheckEnabled() bool { } // If no MachineHealthCheck is defined in the ClusterClass or in the Cluster Topology then return false. if b.ClusterClass.Spec.ControlPlane.MachineHealthCheck == nil && - (b.Topology.ControlPlane.MachineHealthCheck == nil || b.Topology.ControlPlane.MachineHealthCheck.MachineHealthCheckClass.IsZero()) { + (b.Topology.ControlPlane.MachineHealthCheck == nil || b.Topology.ControlPlane.MachineHealthCheck.IsZero()) { return false } // If `enable` is not set then consider it as true. A MachineHealthCheck will be created from either ClusterClass or Cluster Topology. @@ -117,7 +117,7 @@ func (b *ClusterBlueprint) IsControlPlaneMachineHealthCheckEnabled() bool { // ControlPlaneMachineHealthCheckClass returns the MachineHealthCheckClass that should be used to create the MachineHealthCheck object. func (b *ClusterBlueprint) ControlPlaneMachineHealthCheckClass() *clusterv1.MachineHealthCheckClass { - if b.Topology.ControlPlane.MachineHealthCheck != nil && !b.Topology.ControlPlane.MachineHealthCheck.MachineHealthCheckClass.IsZero() { + if b.Topology.ControlPlane.MachineHealthCheck != nil && !b.Topology.ControlPlane.MachineHealthCheck.IsZero() { return &b.Topology.ControlPlane.MachineHealthCheck.MachineHealthCheckClass } return b.ControlPlane.MachineHealthCheck @@ -132,7 +132,7 @@ func (b *ClusterBlueprint) HasControlPlaneMachineHealthCheck() bool { // Returns false otherwise. func (b *ClusterBlueprint) IsMachineDeploymentMachineHealthCheckEnabled(md *clusterv1.MachineDeploymentTopology) bool { // If no MachineHealthCheck is defined in the ClusterClass or in the Cluster Topology then return false. - if b.MachineDeployments[md.Class].MachineHealthCheck == nil && (md.MachineHealthCheck == nil || md.MachineHealthCheck.MachineHealthCheckClass.IsZero()) { + if b.MachineDeployments[md.Class].MachineHealthCheck == nil && (md.MachineHealthCheck == nil || md.MachineHealthCheck.IsZero()) { return false } // If `enable` is not set then consider it as true. A MachineHealthCheck will be created from either ClusterClass or Cluster Topology. @@ -145,7 +145,7 @@ func (b *ClusterBlueprint) IsMachineDeploymentMachineHealthCheckEnabled(md *clus // MachineDeploymentMachineHealthCheckClass return the MachineHealthCheckClass that should be used to create the MachineHealthCheck object. func (b *ClusterBlueprint) MachineDeploymentMachineHealthCheckClass(md *clusterv1.MachineDeploymentTopology) *clusterv1.MachineHealthCheckClass { - if md.MachineHealthCheck != nil && !md.MachineHealthCheck.MachineHealthCheckClass.IsZero() { + if md.MachineHealthCheck != nil && !md.MachineHealthCheck.IsZero() { return &md.MachineHealthCheck.MachineHealthCheckClass } return b.MachineDeployments[md.Class].MachineHealthCheck diff --git a/hack/tools/.custom-gcl.yaml b/hack/tools/.custom-gcl.yaml index 9cd152ec3d75..f0f612ddcdd9 100644 --- a/hack/tools/.custom-gcl.yaml +++ b/hack/tools/.custom-gcl.yaml @@ -1,4 +1,4 @@ -version: v1.63.4 +version: v2.0.2 name: golangci-lint-kube-api-linter destination: ./bin plugins: diff --git a/hack/tools/conversion-verifier/main.go b/hack/tools/conversion-verifier/main.go index ab1109014d3f..95e34bf48093 100644 --- a/hack/tools/conversion-verifier/main.go +++ b/hack/tools/conversion-verifier/main.go @@ -77,13 +77,13 @@ func main() { Registry: &markers.Registry{}, } // Register the markers. - if err := col.Registry.Register(groupNameMarker); err != nil { + if err := col.Register(groupNameMarker); err != nil { klog.Fatal(err) } - if err := col.Registry.Register(storageVersionMarker); err != nil { + if err := col.Register(storageVersionMarker); err != nil { klog.Fatal(err) } - if err := col.Registry.Register(nolintConversionMarker); err != nil { + if err := col.Register(nolintConversionMarker); err != nil { klog.Fatal(err) } diff --git a/internal/controllers/cluster/cluster_controller.go b/internal/controllers/cluster/cluster_controller.go index 419265b981b3..99fdfeeb31e7 100644 --- a/internal/controllers/cluster/cluster_controller.go +++ b/internal/controllers/cluster/cluster_controller.go @@ -231,7 +231,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (retRes ct } // Handle deletion reconciliation loop. - if !cluster.ObjectMeta.DeletionTimestamp.IsZero() { + if !cluster.DeletionTimestamp.IsZero() { reconcileDelete := append( alwaysReconcile, r.reconcileDelete, diff --git a/internal/controllers/clusterclass/clusterclass_controller.go b/internal/controllers/clusterclass/clusterclass_controller.go index 7cfcd15265c5..05636b33552d 100644 --- a/internal/controllers/clusterclass/clusterclass_controller.go +++ b/internal/controllers/clusterclass/clusterclass_controller.go @@ -124,7 +124,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (retres ct return ctrl.Result{}, err } - if !clusterClass.ObjectMeta.DeletionTimestamp.IsZero() { + if !clusterClass.DeletionTimestamp.IsZero() { return ctrl.Result{}, nil } @@ -423,7 +423,7 @@ func addDefinitionToExistingStatusVariable(variable clusterv1.ClusterClassVariab // If definitions already conflict, no need to check. if !combinedVariable.DefinitionsConflict { currentDefinition := combinedVariable.Definitions[0] - if !(currentDefinition.Required == newVariableDefinition.Required && reflect.DeepEqual(currentDefinition.Schema, newVariableDefinition.Schema) && reflect.DeepEqual(currentDefinition.Metadata, newVariableDefinition.Metadata)) { + if currentDefinition.Required != newVariableDefinition.Required || !reflect.DeepEqual(currentDefinition.Schema, newVariableDefinition.Schema) || !reflect.DeepEqual(currentDefinition.Metadata, newVariableDefinition.Metadata) { combinedVariable.DefinitionsConflict = true } } diff --git a/internal/controllers/clusterresourceset/clusterresourceset_controller.go b/internal/controllers/clusterresourceset/clusterresourceset_controller.go index 8cfa04fab9ad..43825613a601 100644 --- a/internal/controllers/clusterresourceset/clusterresourceset_controller.go +++ b/internal/controllers/clusterresourceset/clusterresourceset_controller.go @@ -183,7 +183,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Re } // Handle deletion reconciliation loop. - if !clusterResourceSet.ObjectMeta.DeletionTimestamp.IsZero() { + if !clusterResourceSet.DeletionTimestamp.IsZero() { return ctrl.Result{}, r.reconcileDelete(ctx, clusters, clusterResourceSet) } diff --git a/internal/controllers/clusterresourceset/clusterresourceset_helpers.go b/internal/controllers/clusterresourceset/clusterresourceset_helpers.go index e8d6756da009..b66b93ea64be 100644 --- a/internal/controllers/clusterresourceset/clusterresourceset_helpers.go +++ b/internal/controllers/clusterresourceset/clusterresourceset_helpers.go @@ -144,13 +144,9 @@ func (r *Reconciler) getOrCreateClusterResourceSetBinding(ctx context.Context, c } // getConfigMap retrieves any ConfigMap from the given name and namespace. -func getConfigMap(ctx context.Context, c client.Client, configmapName types.NamespacedName) (*corev1.ConfigMap, error) { +func getConfigMap(ctx context.Context, c client.Client, configMapName types.NamespacedName) (*corev1.ConfigMap, error) { configMap := &corev1.ConfigMap{} - configMapKey := client.ObjectKey{ - Namespace: configmapName.Namespace, - Name: configmapName.Name, - } - if err := c.Get(ctx, configMapKey, configMap); err != nil { + if err := c.Get(ctx, configMapName, configMap); err != nil { return nil, err } @@ -160,11 +156,7 @@ func getConfigMap(ctx context.Context, c client.Client, configmapName types.Name // getSecret retrieves any Secret from the given secret name and namespace. func getSecret(ctx context.Context, c client.Client, secretName types.NamespacedName) (*corev1.Secret, error) { secret := &corev1.Secret{} - secretKey := client.ObjectKey{ - Namespace: secretName.Namespace, - Name: secretName.Name, - } - if err := c.Get(ctx, secretKey, secret); err != nil { + if err := c.Get(ctx, secretName, secret); err != nil { return nil, err } diff --git a/internal/controllers/machine/drain/drain.go b/internal/controllers/machine/drain/drain.go index 1236eceacc12..29d1d7820a60 100644 --- a/internal/controllers/machine/drain/drain.go +++ b/internal/controllers/machine/drain/drain.go @@ -500,7 +500,7 @@ func (r EvictionResult) ConditionMessage(nodeDrainStartTime *metav1.Time) string } // Note: the code computing stale warning for the machine deleting condition is making assumptions on the format/content of this message. // Same applies for other conditions where deleting is involved, e.g. MachineSet's Deleting and ScalingDown condition. - failureMessage = strings.Replace(failureMessage, "Cannot evict pod as it would violate the pod's disruption budget.", "cannot evict pod as it would violate the pod's disruption budget.", -1) + failureMessage = strings.ReplaceAll(failureMessage, "Cannot evict pod as it would violate the pod's disruption budget.", "cannot evict pod as it would violate the pod's disruption budget.") if !strings.HasPrefix(failureMessage, "cannot evict pod as it would violate the pod's disruption budget.") { failureMessage = "failed to evict Pod, " + failureMessage } diff --git a/internal/controllers/machine/drain/filters.go b/internal/controllers/machine/drain/filters.go index d56c958ccd44..fc90618c53ec 100644 --- a/internal/controllers/machine/drain/filters.go +++ b/internal/controllers/machine/drain/filters.go @@ -232,7 +232,7 @@ func (d *Helper) daemonSetFilter(ctx context.Context, pod *corev1.Pod) PodDelete } func (d *Helper) mirrorPodFilter(ctx context.Context, pod *corev1.Pod) PodDeleteStatus { - if _, found := pod.ObjectMeta.Annotations[corev1.MirrorPodAnnotationKey]; found { + if _, found := pod.Annotations[corev1.MirrorPodAnnotationKey]; found { log := ctrl.LoggerFrom(ctx, "Pod", klog.KObj(pod)) log.V(4).Info("Skip evicting static Pod") return MakePodDeleteStatusSkip() @@ -268,7 +268,7 @@ func (d *Helper) unreplicatedFilter(_ context.Context, pod *corev1.Pod) PodDelet func shouldSkipPod(pod *corev1.Pod, skipDeletedTimeoutSeconds int) bool { return skipDeletedTimeoutSeconds > 0 && - !pod.ObjectMeta.DeletionTimestamp.IsZero() && + !pod.DeletionTimestamp.IsZero() && int(time.Since(pod.ObjectMeta.GetDeletionTimestamp().Time).Seconds()) > skipDeletedTimeoutSeconds } @@ -283,7 +283,7 @@ func (d *Helper) skipDeletedFilter(ctx context.Context, pod *corev1.Pod) PodDele func (d *Helper) drainLabelFilter(ctx context.Context, pod *corev1.Pod) PodDeleteStatus { log := ctrl.LoggerFrom(ctx, "Pod", klog.KObj(pod)) - if labelValue, found := pod.ObjectMeta.Labels[clusterv1.PodDrainLabel]; found { + if labelValue, found := pod.Labels[clusterv1.PodDrainLabel]; found { switch { case strings.EqualFold(labelValue, string(clusterv1.MachineDrainRuleDrainBehaviorSkip)): log.V(4).Info(fmt.Sprintf("Skip evicting Pod, because Pod has %s label with %s value", clusterv1.PodDrainLabel, labelValue)) diff --git a/internal/controllers/machine/drain/filters_test.go b/internal/controllers/machine/drain/filters_test.go index c6a6416cffbc..bb79ab1c2798 100644 --- a/internal/controllers/machine/drain/filters_test.go +++ b/internal/controllers/machine/drain/filters_test.go @@ -64,7 +64,7 @@ func TestSkipDeletedFilter(t *testing.T) { if tc.timeStampAgeSeconds > 0 { dTime := &metav1.Time{Time: time.Now().Add(time.Duration(tc.timeStampAgeSeconds) * time.Second * -1)} - pod.ObjectMeta.SetDeletionTimestamp(dTime) + pod.SetDeletionTimestamp(dTime) } podDeleteStatus := h.skipDeletedFilter(context.Background(), &pod) diff --git a/internal/controllers/machine/machine_controller.go b/internal/controllers/machine/machine_controller.go index 4456c45dad23..e9c97da8c904 100644 --- a/internal/controllers/machine/machine_controller.go +++ b/internal/controllers/machine/machine_controller.go @@ -216,7 +216,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Re return ctrl.Result{}, err } - cluster, err := util.GetClusterByName(ctx, r.Client, m.ObjectMeta.Namespace, m.Spec.ClusterName) + cluster, err := util.GetClusterByName(ctx, r.Client, m.Namespace, m.Spec.ClusterName) if err != nil { return ctrl.Result{}, errors.Wrapf(err, "failed to get cluster %q for machine %q in namespace %q", m.Spec.ClusterName, m.Name, m.Namespace) @@ -232,7 +232,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Re return ctrl.Result{}, err } - if !m.ObjectMeta.DeletionTimestamp.IsZero() { + if !m.DeletionTimestamp.IsZero() { // Check reconcileDeleteCache to ensure we won't run reconcileDelete too frequently. // Note: The reconcileDelete func will add entries to the cache. if cacheEntry, ok := r.reconcileDeleteCache.Has(cache.NewReconcileEntryKey(m)); ok { @@ -270,7 +270,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Re } // Handle deletion reconciliation loop. - if !m.ObjectMeta.DeletionTimestamp.IsZero() { + if !m.DeletionTimestamp.IsZero() { reconcileDelete := append( alwaysReconcile, r.reconcileDelete, @@ -298,7 +298,7 @@ func patchMachine(ctx context.Context, patchHelper *patch.Helper, machine *clust clusterv1.MachineOwnerRemediatedV1Beta1Condition, clusterv1.DrainingSucceededV1Beta1Condition, ), - v1beta1conditions.WithStepCounterIf(machine.ObjectMeta.DeletionTimestamp.IsZero() && machine.Spec.ProviderID == nil), + v1beta1conditions.WithStepCounterIf(machine.DeletionTimestamp.IsZero() && machine.Spec.ProviderID == nil), v1beta1conditions.WithStepCounterIfOnly( clusterv1.BootstrapReadyV1Beta1Condition, clusterv1.InfrastructureReadyV1Beta1Condition, @@ -455,9 +455,9 @@ func (r *Reconciler) reconcileDelete(ctx context.Context, s *scope) (ctrl.Result if isDeleteNodeAllowed { // pre-drain.delete lifecycle hook // Return early without error, will requeue if/when the hook owner removes the annotation. - if annotations.HasWithPrefix(clusterv1.PreDrainDeleteHookAnnotationPrefix, m.ObjectMeta.Annotations) { + if annotations.HasWithPrefix(clusterv1.PreDrainDeleteHookAnnotationPrefix, m.Annotations) { var hooks []string - for key := range m.ObjectMeta.Annotations { + for key := range m.Annotations { if strings.HasPrefix(key, clusterv1.PreDrainDeleteHookAnnotationPrefix) { hooks = append(hooks, key) } @@ -554,9 +554,9 @@ func (r *Reconciler) reconcileDelete(ctx context.Context, s *scope) (ctrl.Result // pre-term.delete lifecycle hook // Return early without error, will requeue if/when the hook owner removes the annotation. - if annotations.HasWithPrefix(clusterv1.PreTerminateDeleteHookAnnotationPrefix, m.ObjectMeta.Annotations) { + if annotations.HasWithPrefix(clusterv1.PreTerminateDeleteHookAnnotationPrefix, m.Annotations) { var hooks []string - for key := range m.ObjectMeta.Annotations { + for key := range m.Annotations { if strings.HasPrefix(key, clusterv1.PreTerminateDeleteHookAnnotationPrefix) { hooks = append(hooks, key) } @@ -644,7 +644,7 @@ func (r *Reconciler) isNodeDrainAllowed(m *clusterv1.Machine) bool { } } - if _, exists := m.ObjectMeta.Annotations[clusterv1.ExcludeNodeDrainingAnnotation]; exists { + if _, exists := m.Annotations[clusterv1.ExcludeNodeDrainingAnnotation]; exists { return false } @@ -664,7 +664,7 @@ func (r *Reconciler) isNodeVolumeDetachingAllowed(m *clusterv1.Machine) bool { } } - if _, exists := m.ObjectMeta.Annotations[clusterv1.ExcludeWaitForNodeVolumeDetachAnnotation]; exists { + if _, exists := m.Annotations[clusterv1.ExcludeWaitForNodeVolumeDetachAnnotation]; exists { return false } diff --git a/internal/controllers/machine/machine_controller_noderef.go b/internal/controllers/machine/machine_controller_noderef.go index 0835d41252f3..b15928806031 100644 --- a/internal/controllers/machine/machine_controller_noderef.go +++ b/internal/controllers/machine/machine_controller_noderef.go @@ -386,7 +386,7 @@ func shouldNodeHaveOutdatedTaint(ctx context.Context, c client.Client, m *cluste } md := &clusterv1.MachineDeployment{} objKey = &client.ObjectKey{ - Namespace: m.ObjectMeta.Namespace, + Namespace: m.Namespace, Name: m.Labels[clusterv1.MachineDeploymentNameLabel], } if err := c.Get(ctx, *objKey, md); err != nil { diff --git a/internal/controllers/machine/machine_controller_noderef_test.go b/internal/controllers/machine/machine_controller_noderef_test.go index 129270a85723..d0314357ac82 100644 --- a/internal/controllers/machine/machine_controller_noderef_test.go +++ b/internal/controllers/machine/machine_controller_noderef_test.go @@ -322,7 +322,7 @@ func TestGetNode(t *testing.T) { }(nodesToCleanup...) clusterCache, err := clustercache.SetupWithManager(ctx, env.Manager, clustercache.Options{ - SecretClient: env.Manager.GetClient(), + SecretClient: env.GetClient(), Cache: clustercache.CacheOptions{ Indexes: []clustercache.CacheOptionsIndex{clustercache.NodeProviderIDIndex}, }, diff --git a/internal/controllers/machine/machine_controller_test.go b/internal/controllers/machine/machine_controller_test.go index 89a7e629cca7..563b3e3cb0cf 100644 --- a/internal/controllers/machine/machine_controller_test.go +++ b/internal/controllers/machine/machine_controller_test.go @@ -2116,7 +2116,7 @@ func TestShouldWaitForNodeVolumes(t *testing.T) { attachedVolumes := []corev1.AttachedVolume{ { - Name: corev1.UniqueVolumeName(fmt.Sprintf("kubernetes.io/csi/%s^%s", persistentVolume.Spec.PersistentVolumeSource.CSI.Driver, persistentVolume.Spec.PersistentVolumeSource.CSI.VolumeHandle)), + Name: corev1.UniqueVolumeName(fmt.Sprintf("kubernetes.io/csi/%s^%s", persistentVolume.Spec.CSI.Driver, persistentVolume.Spec.CSI.VolumeHandle)), DevicePath: "test-path", }, } diff --git a/internal/controllers/machinedeployment/machinedeployment_controller.go b/internal/controllers/machinedeployment/machinedeployment_controller.go index 7aa88f4968d1..a3bf721f6002 100644 --- a/internal/controllers/machinedeployment/machinedeployment_controller.go +++ b/internal/controllers/machinedeployment/machinedeployment_controller.go @@ -462,7 +462,7 @@ func (r *Reconciler) MachineSetToDeployments(ctx context.Context, o client.Objec // Check if the controller reference is already set and // return an empty result when one is found. - for _, ref := range ms.ObjectMeta.GetOwnerReferences() { + for _, ref := range ms.GetOwnerReferences() { if ref.Controller != nil && *ref.Controller { return result } diff --git a/internal/controllers/machinedeployment/machinedeployment_sync.go b/internal/controllers/machinedeployment/machinedeployment_sync.go index c6bebe9d12f7..8c79880feec7 100644 --- a/internal/controllers/machinedeployment/machinedeployment_sync.go +++ b/internal/controllers/machinedeployment/machinedeployment_sync.go @@ -631,7 +631,7 @@ func (r *Reconciler) cleanupDeployment(ctx context.Context, oldMSs []*clusterv1. // Avoid deleting machine set with deletion timestamp set aliveFilter := func(ms *clusterv1.MachineSet) bool { - return ms != nil && ms.ObjectMeta.DeletionTimestamp.IsZero() + return ms != nil && ms.DeletionTimestamp.IsZero() } cleanableMSes := mdutil.FilterMachineSets(oldMSs, aliveFilter) diff --git a/internal/controllers/machinehealthcheck/machinehealthcheck_controller_test.go b/internal/controllers/machinehealthcheck/machinehealthcheck_controller_test.go index a6b497c4c376..cba5ed1c429d 100644 --- a/internal/controllers/machinehealthcheck/machinehealthcheck_controller_test.go +++ b/internal/controllers/machinehealthcheck/machinehealthcheck_controller_test.go @@ -2618,7 +2618,7 @@ func createMachinesWithNodes( machine.Finalizers = o.finalizers } if o.annotations != nil { - machine.ObjectMeta.Annotations = o.annotations + machine.Annotations = o.annotations } g.Expect(env.Create(ctx, machine)).To(Succeed()) fmt.Printf("machine created: %s\n", machine.GetName()) diff --git a/internal/controllers/machinehealthcheck/machinehealthcheck_targets.go b/internal/controllers/machinehealthcheck/machinehealthcheck_targets.go index 75b3521ee549..14d62d3d60d7 100644 --- a/internal/controllers/machinehealthcheck/machinehealthcheck_targets.go +++ b/internal/controllers/machinehealthcheck/machinehealthcheck_targets.go @@ -162,13 +162,13 @@ func (t *healthCheckTarget) needsRemediation(logger logr.Logger, timeoutForMachi "controlPlaneInitializedTime", controlPlaneInitialized, "machineInfraReadyTime", machineInfraReady, ) - if v1beta1conditions.IsTrue(t.Cluster, clusterv1.ControlPlaneInitializedV1Beta1Condition) && controlPlaneInitialized != nil && controlPlaneInitialized.Time.After(comparisonTime) { + if v1beta1conditions.IsTrue(t.Cluster, clusterv1.ControlPlaneInitializedV1Beta1Condition) && controlPlaneInitialized != nil && controlPlaneInitialized.After(comparisonTime) { comparisonTime = controlPlaneInitialized.Time } - if v1beta1conditions.IsTrue(t.Cluster, clusterv1.InfrastructureReadyV1Beta1Condition) && clusterInfraReady != nil && clusterInfraReady.Time.After(comparisonTime) { + if v1beta1conditions.IsTrue(t.Cluster, clusterv1.InfrastructureReadyV1Beta1Condition) && clusterInfraReady != nil && clusterInfraReady.After(comparisonTime) { comparisonTime = clusterInfraReady.Time } - if v1beta1conditions.IsTrue(t.Machine, clusterv1.InfrastructureReadyV1Beta1Condition) && machineInfraReady != nil && machineInfraReady.Time.After(comparisonTime) { + if v1beta1conditions.IsTrue(t.Machine, clusterv1.InfrastructureReadyV1Beta1Condition) && machineInfraReady != nil && machineInfraReady.After(comparisonTime) { comparisonTime = machineInfraReady.Time } logger.V(5).Info("Using comparison time", "time", comparisonTime) diff --git a/internal/controllers/machinehealthcheck/machinehealthcheck_targets_test.go b/internal/controllers/machinehealthcheck/machinehealthcheck_targets_test.go index 771de5a90ec1..003f153a683a 100644 --- a/internal/controllers/machinehealthcheck/machinehealthcheck_targets_test.go +++ b/internal/controllers/machinehealthcheck/machinehealthcheck_targets_test.go @@ -261,7 +261,7 @@ func TestHealthCheckTargets(t *testing.T) { // Targets for when the node has not yet been seen by the Machine controller testMachineCreated1200s := testMachine.DeepCopy() nowMinus1200s := metav1.NewTime(time.Now().Add(-1200 * time.Second)) - testMachineCreated1200s.ObjectMeta.CreationTimestamp = nowMinus1200s + testMachineCreated1200s.CreationTimestamp = nowMinus1200s nodeNotYetStartedTarget1200s := healthCheckTarget{ Cluster: cluster, @@ -274,7 +274,7 @@ func TestHealthCheckTargets(t *testing.T) { testMachineCreated400s := testMachine.DeepCopy() nowMinus400s := metav1.NewTime(time.Now().Add(-400 * time.Second)) - testMachineCreated400s.ObjectMeta.CreationTimestamp = nowMinus400s + testMachineCreated400s.CreationTimestamp = nowMinus400s nodeNotYetStartedTarget400s := healthCheckTarget{ Cluster: cluster, diff --git a/internal/controllers/machineset/machineset_controller.go b/internal/controllers/machineset/machineset_controller.go index b75ea4dae268..d08c3380a961 100644 --- a/internal/controllers/machineset/machineset_controller.go +++ b/internal/controllers/machineset/machineset_controller.go @@ -184,7 +184,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (retres ct return ctrl.Result{}, err } - cluster, err := util.GetClusterByName(ctx, r.Client, machineSet.ObjectMeta.Namespace, machineSet.Spec.ClusterName) + cluster, err := util.GetClusterByName(ctx, r.Client, machineSet.Namespace, machineSet.Spec.ClusterName) if err != nil { return ctrl.Result{}, err } @@ -1072,7 +1072,7 @@ func (r *Reconciler) MachineToMachineSets(ctx context.Context, o client.Object) // Check if the controller reference is already set and // return an empty result when one is found. - for _, ref := range m.ObjectMeta.GetOwnerReferences() { + for _, ref := range m.GetOwnerReferences() { if ref.Controller != nil && *ref.Controller { return result } @@ -1341,7 +1341,7 @@ func (r *Reconciler) reconcileUnhealthyMachines(ctx context.Context, s *scope) ( shouldCleanupV1Beta1 := v1beta1conditions.IsTrue(m, clusterv1.MachineHealthCheckSucceededV1Beta1Condition) && v1beta1conditions.IsFalse(m, clusterv1.MachineOwnerRemediatedV1Beta1Condition) shouldCleanup := conditions.IsTrue(m, clusterv1.MachineHealthCheckSucceededCondition) && conditions.IsFalse(m, clusterv1.MachineOwnerRemediatedCondition) - if !(shouldCleanupV1Beta1 || shouldCleanup) { + if !shouldCleanupV1Beta1 && !shouldCleanup { continue } diff --git a/internal/controllers/machineset/machineset_controller_test.go b/internal/controllers/machineset/machineset_controller_test.go index 62418d04d061..1634eab4b2d7 100644 --- a/internal/controllers/machineset/machineset_controller_test.go +++ b/internal/controllers/machineset/machineset_controller_test.go @@ -948,7 +948,7 @@ func TestMachineSetReconcile_MachinesCreatedConditionFalseOnBadInfraRef(t *testi }, }, Spec: clusterv1.MachineSetSpec{ - ClusterName: cluster.ObjectMeta.Name, + ClusterName: cluster.Name, Replicas: &replicas, Template: clusterv1.MachineTemplateSpec{ ObjectMeta: clusterv1.ObjectMeta{ diff --git a/internal/controllers/machineset/machineset_delete_policy.go b/internal/controllers/machineset/machineset_delete_policy.go index 445d06ef2702..5cdf994b8f7e 100644 --- a/internal/controllers/machineset/machineset_delete_policy.go +++ b/internal/controllers/machineset/machineset_delete_policy.go @@ -48,16 +48,16 @@ func oldestDeletePriority(machine *clusterv1.Machine) deletePriority { if !machine.DeletionTimestamp.IsZero() { return mustDelete } - if _, ok := machine.ObjectMeta.Annotations[clusterv1.DeleteMachineAnnotation]; ok { + if _, ok := machine.Annotations[clusterv1.DeleteMachineAnnotation]; ok { return shouldDelete } if !isMachineHealthy(machine) { return betterDelete } - if machine.ObjectMeta.CreationTimestamp.Time.IsZero() { + if machine.CreationTimestamp.Time.IsZero() { return mustNotDelete } - d := metav1.Now().Sub(machine.ObjectMeta.CreationTimestamp.Time) + d := metav1.Now().Sub(machine.CreationTimestamp.Time) if d.Seconds() < 0 { return mustNotDelete } @@ -68,7 +68,7 @@ func newestDeletePriority(machine *clusterv1.Machine) deletePriority { if !machine.DeletionTimestamp.IsZero() { return mustDelete } - if _, ok := machine.ObjectMeta.Annotations[clusterv1.DeleteMachineAnnotation]; ok { + if _, ok := machine.Annotations[clusterv1.DeleteMachineAnnotation]; ok { return shouldDelete } if !isMachineHealthy(machine) { @@ -81,7 +81,7 @@ func randomDeletePolicy(machine *clusterv1.Machine) deletePriority { if !machine.DeletionTimestamp.IsZero() { return mustDelete } - if _, ok := machine.ObjectMeta.Annotations[clusterv1.DeleteMachineAnnotation]; ok { + if _, ok := machine.Annotations[clusterv1.DeleteMachineAnnotation]; ok { return shouldDelete } if !isMachineHealthy(machine) { diff --git a/internal/controllers/machineset/machineset_delete_policy_test.go b/internal/controllers/machineset/machineset_delete_policy_test.go index 1286a0cc3ab6..525cda51a24a 100644 --- a/internal/controllers/machineset/machineset_delete_policy_test.go +++ b/internal/controllers/machineset/machineset_delete_policy_test.go @@ -312,27 +312,27 @@ func TestMachineNewestDelete(t *testing.T) { Status: clusterv1.MachineStatus{NodeRef: nodeRef}, } newest := &clusterv1.Machine{ - ObjectMeta: metav1.ObjectMeta{CreationTimestamp: metav1.NewTime(currentTime.Time.AddDate(0, 0, -1))}, + ObjectMeta: metav1.ObjectMeta{CreationTimestamp: metav1.NewTime(currentTime.AddDate(0, 0, -1))}, Status: clusterv1.MachineStatus{NodeRef: nodeRef}, } secondNewest := &clusterv1.Machine{ - ObjectMeta: metav1.ObjectMeta{CreationTimestamp: metav1.NewTime(currentTime.Time.AddDate(0, 0, -5))}, + ObjectMeta: metav1.ObjectMeta{CreationTimestamp: metav1.NewTime(currentTime.AddDate(0, 0, -5))}, Status: clusterv1.MachineStatus{NodeRef: nodeRef}, } secondOldest := &clusterv1.Machine{ - ObjectMeta: metav1.ObjectMeta{CreationTimestamp: metav1.NewTime(currentTime.Time.AddDate(0, 0, -10))}, + ObjectMeta: metav1.ObjectMeta{CreationTimestamp: metav1.NewTime(currentTime.AddDate(0, 0, -10))}, Status: clusterv1.MachineStatus{NodeRef: nodeRef}, } oldest := &clusterv1.Machine{ - ObjectMeta: metav1.ObjectMeta{CreationTimestamp: metav1.NewTime(currentTime.Time.AddDate(0, 0, -10))}, + ObjectMeta: metav1.ObjectMeta{CreationTimestamp: metav1.NewTime(currentTime.AddDate(0, 0, -10))}, Status: clusterv1.MachineStatus{NodeRef: nodeRef}, } deleteMachineWithMachineAnnotation := &clusterv1.Machine{ - ObjectMeta: metav1.ObjectMeta{Annotations: map[string]string{clusterv1.DeleteMachineAnnotation: ""}, CreationTimestamp: metav1.NewTime(currentTime.Time.AddDate(0, 0, -10))}, + ObjectMeta: metav1.ObjectMeta{Annotations: map[string]string{clusterv1.DeleteMachineAnnotation: ""}, CreationTimestamp: metav1.NewTime(currentTime.AddDate(0, 0, -10))}, Status: clusterv1.MachineStatus{NodeRef: nodeRef}, } unhealthyMachine := &clusterv1.Machine{ - ObjectMeta: metav1.ObjectMeta{CreationTimestamp: metav1.NewTime(currentTime.Time.AddDate(0, 0, -10))}, + ObjectMeta: metav1.ObjectMeta{CreationTimestamp: metav1.NewTime(currentTime.AddDate(0, 0, -10))}, Status: clusterv1.MachineStatus{ Deprecated: &clusterv1.MachineDeprecatedStatus{ V1Beta1: &clusterv1.MachineV1Beta1DeprecatedStatus{ @@ -343,10 +343,10 @@ func TestMachineNewestDelete(t *testing.T) { }, } deleteMachineWithoutNodeRef := &clusterv1.Machine{ - ObjectMeta: metav1.ObjectMeta{CreationTimestamp: metav1.NewTime(currentTime.Time.AddDate(0, 0, -1))}, + ObjectMeta: metav1.ObjectMeta{CreationTimestamp: metav1.NewTime(currentTime.AddDate(0, 0, -1))}, } nodeHealthyConditionFalseMachine := &clusterv1.Machine{ - ObjectMeta: metav1.ObjectMeta{CreationTimestamp: metav1.NewTime(currentTime.Time.AddDate(0, 0, -10))}, + ObjectMeta: metav1.ObjectMeta{CreationTimestamp: metav1.NewTime(currentTime.AddDate(0, 0, -10))}, Status: clusterv1.MachineStatus{ NodeRef: nodeRef, Deprecated: &clusterv1.MachineDeprecatedStatus{ @@ -362,7 +362,7 @@ func TestMachineNewestDelete(t *testing.T) { }, } nodeHealthyConditionUnknownMachine := &clusterv1.Machine{ - ObjectMeta: metav1.ObjectMeta{CreationTimestamp: metav1.NewTime(currentTime.Time.AddDate(0, 0, -10))}, + ObjectMeta: metav1.ObjectMeta{CreationTimestamp: metav1.NewTime(currentTime.AddDate(0, 0, -10))}, Status: clusterv1.MachineStatus{ NodeRef: nodeRef, Deprecated: &clusterv1.MachineDeprecatedStatus{ @@ -469,27 +469,27 @@ func TestMachineOldestDelete(t *testing.T) { Status: clusterv1.MachineStatus{NodeRef: nodeRef}, } newest := &clusterv1.Machine{ - ObjectMeta: metav1.ObjectMeta{CreationTimestamp: metav1.NewTime(currentTime.Time.AddDate(0, 0, -1))}, + ObjectMeta: metav1.ObjectMeta{CreationTimestamp: metav1.NewTime(currentTime.AddDate(0, 0, -1))}, Status: clusterv1.MachineStatus{NodeRef: nodeRef}, } secondNewest := &clusterv1.Machine{ - ObjectMeta: metav1.ObjectMeta{CreationTimestamp: metav1.NewTime(currentTime.Time.AddDate(0, 0, -5))}, + ObjectMeta: metav1.ObjectMeta{CreationTimestamp: metav1.NewTime(currentTime.AddDate(0, 0, -5))}, Status: clusterv1.MachineStatus{NodeRef: nodeRef}, } secondOldest := &clusterv1.Machine{ - ObjectMeta: metav1.ObjectMeta{CreationTimestamp: metav1.NewTime(currentTime.Time.AddDate(0, 0, -10))}, + ObjectMeta: metav1.ObjectMeta{CreationTimestamp: metav1.NewTime(currentTime.AddDate(0, 0, -10))}, Status: clusterv1.MachineStatus{NodeRef: nodeRef}, } oldest := &clusterv1.Machine{ - ObjectMeta: metav1.ObjectMeta{CreationTimestamp: metav1.NewTime(currentTime.Time.AddDate(0, 0, -10))}, + ObjectMeta: metav1.ObjectMeta{CreationTimestamp: metav1.NewTime(currentTime.AddDate(0, 0, -10))}, Status: clusterv1.MachineStatus{NodeRef: nodeRef}, } deleteMachineWithMachineAnnotation := &clusterv1.Machine{ - ObjectMeta: metav1.ObjectMeta{Annotations: map[string]string{clusterv1.DeleteMachineAnnotation: ""}, CreationTimestamp: metav1.NewTime(currentTime.Time.AddDate(0, 0, -10))}, + ObjectMeta: metav1.ObjectMeta{Annotations: map[string]string{clusterv1.DeleteMachineAnnotation: ""}, CreationTimestamp: metav1.NewTime(currentTime.AddDate(0, 0, -10))}, Status: clusterv1.MachineStatus{NodeRef: nodeRef}, } unhealthyMachine := &clusterv1.Machine{ - ObjectMeta: metav1.ObjectMeta{CreationTimestamp: metav1.NewTime(currentTime.Time.AddDate(0, 0, -10))}, + ObjectMeta: metav1.ObjectMeta{CreationTimestamp: metav1.NewTime(currentTime.AddDate(0, 0, -10))}, Status: clusterv1.MachineStatus{ Deprecated: &clusterv1.MachineDeprecatedStatus{ V1Beta1: &clusterv1.MachineV1Beta1DeprecatedStatus{ @@ -504,7 +504,7 @@ func TestMachineOldestDelete(t *testing.T) { Status: clusterv1.MachineStatus{NodeRef: nodeRef}, } unhealthyMachineA := &clusterv1.Machine{ - ObjectMeta: metav1.ObjectMeta{Name: "a", CreationTimestamp: metav1.NewTime(currentTime.Time.AddDate(0, 0, -10))}, + ObjectMeta: metav1.ObjectMeta{Name: "a", CreationTimestamp: metav1.NewTime(currentTime.AddDate(0, 0, -10))}, Status: clusterv1.MachineStatus{ Deprecated: &clusterv1.MachineDeprecatedStatus{ V1Beta1: &clusterv1.MachineV1Beta1DeprecatedStatus{ @@ -515,7 +515,7 @@ func TestMachineOldestDelete(t *testing.T) { }, } unhealthyMachineZ := &clusterv1.Machine{ - ObjectMeta: metav1.ObjectMeta{Name: "z", CreationTimestamp: metav1.NewTime(currentTime.Time.AddDate(0, 0, -10))}, + ObjectMeta: metav1.ObjectMeta{Name: "z", CreationTimestamp: metav1.NewTime(currentTime.AddDate(0, 0, -10))}, Status: clusterv1.MachineStatus{ Deprecated: &clusterv1.MachineDeprecatedStatus{ V1Beta1: &clusterv1.MachineV1Beta1DeprecatedStatus{ @@ -526,10 +526,10 @@ func TestMachineOldestDelete(t *testing.T) { }, } deleteMachineWithoutNodeRef := &clusterv1.Machine{ - ObjectMeta: metav1.ObjectMeta{CreationTimestamp: metav1.NewTime(currentTime.Time.AddDate(0, 0, -10))}, + ObjectMeta: metav1.ObjectMeta{CreationTimestamp: metav1.NewTime(currentTime.AddDate(0, 0, -10))}, } nodeHealthyConditionFalseMachine := &clusterv1.Machine{ - ObjectMeta: metav1.ObjectMeta{CreationTimestamp: metav1.NewTime(currentTime.Time.AddDate(0, 0, -10))}, + ObjectMeta: metav1.ObjectMeta{CreationTimestamp: metav1.NewTime(currentTime.AddDate(0, 0, -10))}, Status: clusterv1.MachineStatus{ NodeRef: nodeRef, Deprecated: &clusterv1.MachineDeprecatedStatus{ @@ -545,7 +545,7 @@ func TestMachineOldestDelete(t *testing.T) { }, } nodeHealthyConditionUnknownMachine := &clusterv1.Machine{ - ObjectMeta: metav1.ObjectMeta{CreationTimestamp: metav1.NewTime(currentTime.Time.AddDate(0, 0, -10))}, + ObjectMeta: metav1.ObjectMeta{CreationTimestamp: metav1.NewTime(currentTime.AddDate(0, 0, -10))}, Status: clusterv1.MachineStatus{ NodeRef: nodeRef, Deprecated: &clusterv1.MachineDeprecatedStatus{ diff --git a/internal/controllers/machineset/machineset_preflight.go b/internal/controllers/machineset/machineset_preflight.go index de8296ca2c67..81986b33f76e 100644 --- a/internal/controllers/machineset/machineset_preflight.go +++ b/internal/controllers/machineset/machineset_preflight.go @@ -151,7 +151,7 @@ func (r *Reconciler) runPreflightChecks(ctx context.Context, cluster *clusterv1. func shouldRun(preflightChecks, skippedPreflightChecks sets.Set[clusterv1.MachineSetPreflightCheck], preflightCheck clusterv1.MachineSetPreflightCheck) bool { return (preflightChecks.Has(clusterv1.MachineSetPreflightCheckAll) || preflightChecks.Has(preflightCheck)) && - !(skippedPreflightChecks.Has(clusterv1.MachineSetPreflightCheckAll) || skippedPreflightChecks.Has(preflightCheck)) + (!skippedPreflightChecks.Has(clusterv1.MachineSetPreflightCheckAll) && !skippedPreflightChecks.Has(preflightCheck)) } func (r *Reconciler) controlPlaneStablePreflightCheck(controlPlane *unstructured.Unstructured, cluster *clusterv1.Cluster, controlPlaneVersion string) (preflightCheckErrorMessage, error) { diff --git a/internal/controllers/topology/cluster/cluster_controller.go b/internal/controllers/topology/cluster/cluster_controller.go index ef427728d058..d6709b196898 100644 --- a/internal/controllers/topology/cluster/cluster_controller.go +++ b/internal/controllers/topology/cluster/cluster_controller.go @@ -170,8 +170,8 @@ func clusterChangeIsRelevant(scheme *runtime.Scheme, logger logr.Logger) predica dropNotRelevant := func(cluster *clusterv1.Cluster) *clusterv1.Cluster { c := cluster.DeepCopy() // Drop metadata fields which are impacted by not relevant changes. - c.ObjectMeta.ManagedFields = nil - c.ObjectMeta.ResourceVersion = "" + c.ManagedFields = nil + c.ResourceVersion = "" return c } @@ -218,8 +218,8 @@ func machineDeploymentChangeIsRelevant(scheme *runtime.Scheme, logger logr.Logge dropNotRelevant := func(machineDeployment *clusterv1.MachineDeployment) *clusterv1.MachineDeployment { md := machineDeployment.DeepCopy() // Drop metadata fields which are impacted by not relevant changes. - md.ObjectMeta.ManagedFields = nil - md.ObjectMeta.ResourceVersion = "" + md.ManagedFields = nil + md.ResourceVersion = "" return md } @@ -326,7 +326,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Re // In case the object is deleted, the managed topology stops to reconcile; // (the other controllers will take care of deletion). - if !cluster.ObjectMeta.DeletionTimestamp.IsZero() { + if !cluster.DeletionTimestamp.IsZero() { return r.reconcileDelete(ctx, cluster) } diff --git a/internal/controllers/topology/cluster/cluster_controller_test.go b/internal/controllers/topology/cluster/cluster_controller_test.go index 18ad96494558..6b78892c6524 100644 --- a/internal/controllers/topology/cluster/cluster_controller_test.go +++ b/internal/controllers/topology/cluster/cluster_controller_test.go @@ -877,8 +877,8 @@ func setupTestEnvForIntegrationTests(ns *corev1.Namespace) (func() error, error) cluster1Secret := kubeconfig.GenerateSecret(cluster1, kubeconfig.FromEnvTestConfig(env.Config, cluster1)) cluster2Secret := kubeconfig.GenerateSecret(cluster2, kubeconfig.FromEnvTestConfig(env.Config, cluster2)) // Unset the ownerrefs otherwise they are invalid because they contain an empty uid. - cluster1Secret.ObjectMeta.OwnerReferences = nil - cluster2Secret.ObjectMeta.OwnerReferences = nil + cluster1Secret.OwnerReferences = nil + cluster2Secret.OwnerReferences = nil // Create a set of setupTestEnvForIntegrationTests from the objects above to add to the API server when the test environment starts. // The objects are created for every test, though some e.g. infrastructureMachineTemplate2 may not be used in every test. diff --git a/internal/controllers/topology/cluster/conditions.go b/internal/controllers/topology/cluster/conditions.go index 2fdefe708c15..304e0be5d9bd 100644 --- a/internal/controllers/topology/cluster/conditions.go +++ b/internal/controllers/topology/cluster/conditions.go @@ -74,7 +74,7 @@ func (r *Reconciler) reconcileTopologyReconciledCondition(s *scope.Scope, cluste } // Mark TopologyReconciled as false due to cluster deletion. - if !cluster.ObjectMeta.DeletionTimestamp.IsZero() { + if !cluster.DeletionTimestamp.IsZero() { v1beta1conditions.Set(cluster, v1beta1conditions.FalseCondition( clusterv1.TopologyReconciledV1Beta1Condition, diff --git a/internal/controllers/topology/cluster/current_state.go b/internal/controllers/topology/cluster/current_state.go index 68e7ec116c90..fbb024537ce5 100644 --- a/internal/controllers/topology/cluster/current_state.go +++ b/internal/controllers/topology/cluster/current_state.go @@ -192,7 +192,7 @@ func (r *Reconciler) getCurrentMachineDeploymentState(ctx context.Context, bluep // Retrieve the name which is assigned in Cluster's topology // from a well-defined label. - mdTopologyName, ok := m.ObjectMeta.Labels[clusterv1.ClusterTopologyMachineDeploymentNameLabel] + mdTopologyName, ok := m.Labels[clusterv1.ClusterTopologyMachineDeploymentNameLabel] if !ok || mdTopologyName == "" { return nil, fmt.Errorf("failed to find label %s in MachineDeployment %s", clusterv1.ClusterTopologyMachineDeploymentNameLabel, klog.KObj(m)) } @@ -317,7 +317,7 @@ func (r *Reconciler) getCurrentMachinePoolState(ctx context.Context, blueprintMa // Retrieve the name which is assigned in Cluster's topology // from a well-defined label. - mpTopologyName, ok := m.ObjectMeta.Labels[clusterv1.ClusterTopologyMachinePoolNameLabel] + mpTopologyName, ok := m.Labels[clusterv1.ClusterTopologyMachinePoolNameLabel] if !ok || mpTopologyName == "" { return nil, fmt.Errorf("failed to find label %s in MachinePool %s", clusterv1.ClusterTopologyMachinePoolNameLabel, klog.KObj(m)) } diff --git a/internal/controllers/topology/cluster/patches/engine.go b/internal/controllers/topology/cluster/patches/engine.go index f41ece4fafdc..8a0870a878a2 100644 --- a/internal/controllers/topology/cluster/patches/engine.go +++ b/internal/controllers/topology/cluster/patches/engine.go @@ -192,7 +192,8 @@ func addVariablesForPatch(blueprint *scope.ClusterBlueprint, desired *scope.Clus } // If the item holder reference is a MachineDeployment calculate the variables for each MachineDeploymentTopology // and add them to the variables for the MachineDeployment. - if item.HolderReference.Kind == "MachineDeployment" { + switch item.HolderReference.Kind { + case "MachineDeployment": md, ok := mdStateIndex[item.HolderReference.Name] if !ok { return errors.Errorf("could not find desired state for MachineDeployment %s", klog.KRef(item.HolderReference.Namespace, item.HolderReference.Name)) @@ -208,7 +209,7 @@ func addVariablesForPatch(blueprint *scope.ClusterBlueprint, desired *scope.Clus return errors.Wrapf(err, "failed to calculate variables for %s", klog.KObj(md.Object)) } item.Variables = mdVariables - } else if item.HolderReference.Kind == "MachinePool" { + case "MachinePool": mp, ok := mpStateIndex[item.HolderReference.Name] if !ok { return errors.Errorf("could not find desired state for MachinePool %s", klog.KRef(item.HolderReference.Namespace, item.HolderReference.Name)) diff --git a/internal/controllers/topology/cluster/reconcile_state.go b/internal/controllers/topology/cluster/reconcile_state.go index fda8a7ca6155..9176e9e3dae3 100644 --- a/internal/controllers/topology/cluster/reconcile_state.go +++ b/internal/controllers/topology/cluster/reconcile_state.go @@ -560,7 +560,7 @@ func (r *Reconciler) getCurrentMachineDeployments(ctx context.Context, s *scope. currentMDs := sets.Set[string]{} for _, md := range mdList.Items { - mdTopologyName, ok := md.ObjectMeta.Labels[clusterv1.ClusterTopologyMachineDeploymentNameLabel] + mdTopologyName, ok := md.Labels[clusterv1.ClusterTopologyMachineDeploymentNameLabel] if ok || mdTopologyName != "" { currentMDs.Insert(mdTopologyName) } @@ -904,7 +904,7 @@ func (r *Reconciler) getCurrentMachinePools(ctx context.Context, s *scope.Scope) currentMPs := sets.Set[string]{} for _, mp := range mpList.Items { - mpTopologyName, ok := mp.ObjectMeta.Labels[clusterv1.ClusterTopologyMachinePoolNameLabel] + mpTopologyName, ok := mp.Labels[clusterv1.ClusterTopologyMachinePoolNameLabel] if ok || mpTopologyName != "" { currentMPs.Insert(mpTopologyName) } diff --git a/internal/controllers/topology/cluster/reconcile_state_test.go b/internal/controllers/topology/cluster/reconcile_state_test.go index 4e8e558d5ec2..6e95dc4944bb 100644 --- a/internal/controllers/topology/cluster/reconcile_state_test.go +++ b/internal/controllers/topology/cluster/reconcile_state_test.go @@ -2028,10 +2028,10 @@ func TestReconcileMachineDeployments(t *testing.T) { infrastructureMachineTemplate9m := builder.TestInfrastructureMachineTemplate(metav1.NamespaceDefault, "infrastructure-machine-9m").Build() bootstrapTemplate9m := builder.TestBootstrapTemplate(metav1.NamespaceDefault, "bootstrap-config-9m").Build() md9 := newFakeMachineDeploymentTopologyState("md-9m", infrastructureMachineTemplate9m, bootstrapTemplate9m, nil) - md9.Object.Spec.Template.ObjectMeta.Labels = map[string]string{clusterv1.ClusterNameLabel: "cluster-1", "foo": "bar"} + md9.Object.Spec.Template.Labels = map[string]string{clusterv1.ClusterNameLabel: "cluster-1", "foo": "bar"} md9.Object.Spec.Selector.MatchLabels = map[string]string{clusterv1.ClusterNameLabel: "cluster-1", "foo": "bar"} md9WithInstanceSpecificTemplateMetadataAndSelector := newFakeMachineDeploymentTopologyState("md-9m", infrastructureMachineTemplate9m, bootstrapTemplate9m, nil) - md9WithInstanceSpecificTemplateMetadataAndSelector.Object.Spec.Template.ObjectMeta.Labels = map[string]string{"foo": "bar"} + md9WithInstanceSpecificTemplateMetadataAndSelector.Object.Spec.Template.Labels = map[string]string{"foo": "bar"} md9WithInstanceSpecificTemplateMetadataAndSelector.Object.Spec.Selector.MatchLabels = map[string]string{"foo": "bar"} tests := []struct { @@ -2230,7 +2230,7 @@ func TestReconcileMachineDeployments(t *testing.T) { if wantMachineDeploymentState.Object.Name != gotMachineDeployment.Name { continue } - currentMachineDeploymentTopologyName := wantMachineDeploymentState.Object.ObjectMeta.Labels[clusterv1.ClusterTopologyMachineDeploymentNameLabel] + currentMachineDeploymentTopologyName := wantMachineDeploymentState.Object.Labels[clusterv1.ClusterTopologyMachineDeploymentNameLabel] currentMachineDeploymentState := currentMachineDeploymentStates[currentMachineDeploymentTopologyName] // Copy over the name of the newly created InfrastructureRef and Bootsrap.ConfigRef because they get a generated name @@ -2495,9 +2495,9 @@ func TestReconcileMachinePools(t *testing.T) { infrastructureMachinePool9m := builder.TestInfrastructureMachinePool(metav1.NamespaceDefault, "infrastructure-machinepool-9m").Build() bootstrapConfig9m := builder.TestBootstrapConfig(metav1.NamespaceDefault, "bootstrap-config-9m").Build() mp9 := newFakeMachinePoolTopologyState("mp-9m", infrastructureMachinePool9m, bootstrapConfig9m) - mp9.Object.Spec.Template.ObjectMeta.Labels = map[string]string{clusterv1.ClusterNameLabel: "cluster-1", "foo": "bar"} + mp9.Object.Spec.Template.Labels = map[string]string{clusterv1.ClusterNameLabel: "cluster-1", "foo": "bar"} mp9WithInstanceSpecificTemplateMetadata := newFakeMachinePoolTopologyState("mp-9m", infrastructureMachinePool9m, bootstrapConfig9m) - mp9WithInstanceSpecificTemplateMetadata.Object.Spec.Template.ObjectMeta.Labels = map[string]string{"foo": "bar"} + mp9WithInstanceSpecificTemplateMetadata.Object.Spec.Template.Labels = map[string]string{"foo": "bar"} tests := []struct { name string @@ -2686,7 +2686,7 @@ func TestReconcileMachinePools(t *testing.T) { if wantMachinePoolState.Object.Name != gotMachinePool.Name { continue } - currentMachinePoolTopologyName := wantMachinePoolState.Object.ObjectMeta.Labels[clusterv1.ClusterTopologyMachinePoolNameLabel] + currentMachinePoolTopologyName := wantMachinePoolState.Object.Labels[clusterv1.ClusterTopologyMachinePoolNameLabel] currentMachinePoolState := currentMachinePoolStates[currentMachinePoolTopologyName] // Copy over the name of the newly created InfrastructureRef and Bootsrap.ConfigRef because they get a generated name diff --git a/internal/controllers/topology/cluster/structuredmerge/serversidepathhelper_test.go b/internal/controllers/topology/cluster/structuredmerge/serversidepathhelper_test.go index 69aa5c4050f7..1c52d8155d9b 100644 --- a/internal/controllers/topology/cluster/structuredmerge/serversidepathhelper_test.go +++ b/internal/controllers/topology/cluster/structuredmerge/serversidepathhelper_test.go @@ -816,7 +816,7 @@ func TestServerSideApplyWithDefaulting(t *testing.T) { // It also calculates and returns the corresponding MutatingWebhookConfiguration. // Note: To activate the webhook, the MutatingWebhookConfiguration has to be deployed. func setupWebhookWithManager(ns *corev1.Namespace) (*KubeadmConfigTemplateTestDefaulter, *admissionv1.MutatingWebhookConfiguration, error) { - webhookServer := env.Manager.GetWebhookServer().(*webhook.DefaultServer) + webhookServer := env.GetWebhookServer().(*webhook.DefaultServer) // Calculate webhook host and path. // Note: This is done the same way as in our envtest package. @@ -830,7 +830,7 @@ func setupWebhookWithManager(ns *corev1.Namespace) (*KubeadmConfigTemplateTestDe // Note: This should only ever be called once with the same path, otherwise we get a panic. defaulter := &KubeadmConfigTemplateTestDefaulter{} webhookServer.Register(webhookPath, - admission.WithCustomDefaulter(env.Manager.GetScheme(), &bootstrapv1.KubeadmConfigTemplate{}, defaulter)) + admission.WithCustomDefaulter(env.GetScheme(), &bootstrapv1.KubeadmConfigTemplate{}, defaulter)) // Calculate the MutatingWebhookConfiguration caBundle, err := os.ReadFile(filepath.Join(webhookServer.Options.CertDir, webhookServer.Options.CertName)) diff --git a/internal/controllers/topology/machinedeployment/machinedeployment_controller.go b/internal/controllers/topology/machinedeployment/machinedeployment_controller.go index ebe4e6405c5a..6c4a11058f5d 100644 --- a/internal/controllers/topology/machinedeployment/machinedeployment_controller.go +++ b/internal/controllers/topology/machinedeployment/machinedeployment_controller.go @@ -124,7 +124,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Re return ctrl.Result{}, nil } // Error reading the object - requeue the request. - return ctrl.Result{}, errors.Wrapf(err, "failed to get MachineDeployment/%s", req.NamespacedName.Name) + return ctrl.Result{}, errors.Wrapf(err, "failed to get MachineDeployment/%s", req.Name) } log = log.WithValues("Cluster", klog.KRef(md.Namespace, md.Spec.ClusterName)) @@ -164,7 +164,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Re }() // Handle deletion reconciliation loop. - if !md.ObjectMeta.DeletionTimestamp.IsZero() { + if !md.DeletionTimestamp.IsZero() { return ctrl.Result{}, r.reconcileDelete(ctx, md) } diff --git a/internal/controllers/topology/machineset/machineset_controller.go b/internal/controllers/topology/machineset/machineset_controller.go index a9d286f96eef..7578fdd0c5ea 100644 --- a/internal/controllers/topology/machineset/machineset_controller.go +++ b/internal/controllers/topology/machineset/machineset_controller.go @@ -124,7 +124,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Re return ctrl.Result{}, nil } // Error reading the object - requeue the request. - return ctrl.Result{}, errors.Wrapf(err, "failed to get MachineSet/%s", req.NamespacedName.Name) + return ctrl.Result{}, errors.Wrapf(err, "failed to get MachineSet/%s", req.Name) } log := ctrl.LoggerFrom(ctx).WithValues("Cluster", klog.KRef(ms.Namespace, ms.Spec.ClusterName)) @@ -171,7 +171,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Re }() // Handle deletion reconciliation loop. - if !ms.ObjectMeta.DeletionTimestamp.IsZero() { + if !ms.DeletionTimestamp.IsZero() { return ctrl.Result{}, r.reconcileDelete(ctx, ms) } diff --git a/internal/test/envtest/environment.go b/internal/test/envtest/environment.go index eaba04ceea04..d90e1bec1bb0 100644 --- a/internal/test/envtest/environment.go +++ b/internal/test/envtest/environment.go @@ -389,11 +389,11 @@ func newEnvironment(managerCacheOptions cache.Options, uncachedObjs ...client.Ob func (e *Environment) start(ctx context.Context) { go func() { fmt.Println("Starting the test environment manager") - if err := e.Manager.Start(ctx); err != nil { + if err := e.Start(ctx); err != nil { panic(fmt.Sprintf("Failed to start the test environment manager: %v", err)) } }() - <-e.Manager.Elected() + <-e.Elected() e.waitForWebhooks() } @@ -434,7 +434,7 @@ func (e *Environment) CreateKubeconfigSecret(ctx context.Context, cluster *clust func (e *Environment) Cleanup(ctx context.Context, objs ...client.Object) error { errs := []error{} for _, o := range objs { - err := e.Client.Delete(ctx, o) + err := e.Delete(ctx, o) if apierrors.IsNotFound(err) { continue } @@ -481,7 +481,7 @@ func (e *Environment) CleanupAndWait(ctx context.Context, objs ...client.Object) // // NOTE: Waiting for the cache to be updated helps in preventing test flakes due to the cache sync delays. func (e *Environment) CreateAndWait(ctx context.Context, obj client.Object, opts ...client.CreateOption) error { - if err := e.Client.Create(ctx, obj, opts...); err != nil { + if err := e.Create(ctx, obj, opts...); err != nil { return err } @@ -518,7 +518,7 @@ func (e *Environment) PatchAndWait(ctx context.Context, obj client.Object, opts // Store old resource version, empty string if not found. oldResourceVersion := objCopy.GetResourceVersion() - if err := e.Client.Patch(ctx, obj, client.Apply, opts...); err != nil { + if err := e.Patch(ctx, obj, client.Apply, opts...); err != nil { return err } @@ -552,7 +552,7 @@ func (e *Environment) CreateNamespace(ctx context.Context, generateName string) }, }, } - if err := e.Client.Create(ctx, ns); err != nil { + if err := e.Create(ctx, ns); err != nil { return nil, err } diff --git a/internal/util/ssa/cache.go b/internal/util/ssa/cache.go index c706940fb075..0587e893358f 100644 --- a/internal/util/ssa/cache.go +++ b/internal/util/ssa/cache.go @@ -92,7 +92,7 @@ func (r *ssaCache) Add(key string) { // Note: keys expire after the ttl. func (r *ssaCache) Has(key, kind string) bool { // Note: We can ignore the error here because GetByKey never returns an error. - _, exists, _ := r.Store.GetByKey(key) + _, exists, _ := r.GetByKey(key) if exists { cacheHits.WithLabelValues(kind, r.controllerName).Inc() } else { diff --git a/internal/webhooks/cluster.go b/internal/webhooks/cluster.go index 03db7f54e2eb..79bf6e323198 100644 --- a/internal/webhooks/cluster.go +++ b/internal/webhooks/cluster.go @@ -119,18 +119,18 @@ func (webhook *Cluster) Default(ctx context.Context, obj runtime.Object) error { } if cluster.Spec.Topology.ControlPlane.MachineHealthCheck != nil && - cluster.Spec.Topology.ControlPlane.MachineHealthCheck.MachineHealthCheckClass.RemediationTemplate != nil && - cluster.Spec.Topology.ControlPlane.MachineHealthCheck.MachineHealthCheckClass.RemediationTemplate.Namespace == "" { - cluster.Spec.Topology.ControlPlane.MachineHealthCheck.MachineHealthCheckClass.RemediationTemplate.Namespace = cluster.Namespace + cluster.Spec.Topology.ControlPlane.MachineHealthCheck.RemediationTemplate != nil && + cluster.Spec.Topology.ControlPlane.MachineHealthCheck.RemediationTemplate.Namespace == "" { + cluster.Spec.Topology.ControlPlane.MachineHealthCheck.RemediationTemplate.Namespace = cluster.Namespace } if cluster.Spec.Topology.Workers != nil { for i := range cluster.Spec.Topology.Workers.MachineDeployments { md := cluster.Spec.Topology.Workers.MachineDeployments[i] if md.MachineHealthCheck != nil && - md.MachineHealthCheck.MachineHealthCheckClass.RemediationTemplate != nil && - md.MachineHealthCheck.MachineHealthCheckClass.RemediationTemplate.Namespace == "" { - md.MachineHealthCheck.MachineHealthCheckClass.RemediationTemplate.Namespace = cluster.Namespace + md.MachineHealthCheck.RemediationTemplate != nil && + md.MachineHealthCheck.RemediationTemplate.Namespace == "" { + md.MachineHealthCheck.RemediationTemplate.Namespace = cluster.Namespace } } } @@ -358,7 +358,7 @@ func (webhook *Cluster) validateTopology(ctx context.Context, oldCluster, newClu // Get the ClusterClass referenced in the Cluster. clusterClass, warnings, clusterClassPollErr := webhook.validateClusterClassExistsAndIsReconciled(ctx, newCluster) // If the error is anything other than "NotFound" or "NotReconciled" return all errors. - if clusterClassPollErr != nil && !(apierrors.IsNotFound(clusterClassPollErr) || errors.Is(clusterClassPollErr, errClusterClassNotReconciled)) { + if clusterClassPollErr != nil && (!apierrors.IsNotFound(clusterClassPollErr) && !errors.Is(clusterClassPollErr, errClusterClassNotReconciled)) { allErrs = append( allErrs, field.InternalError( fldPath.Child("class"), @@ -536,7 +536,7 @@ func validateTopologyControlPlaneVersion(ctx context.Context, ctrlClient client. return errors.Wrapf(err, "failed to check if control plane is upgrading: failed to parse control plane version %s", *cpVersionString) } if cpVersion.NE(oldVersion) { - return fmt.Errorf("Cluster.spec.topology.version %s was not propagated to control plane yet (control plane version %s)", oldVersion, cpVersion) //nolint:stylecheck // capitalization is intentional + return fmt.Errorf("cluster.spec.topology.version %s was not propagated to control plane yet (control plane version %s)", oldVersion, cpVersion) // capitalization is intentional } provisioning, err := contract.ControlPlane().IsProvisioning(cp) @@ -674,7 +674,7 @@ func validateMachineHealthChecks(cluster *clusterv1.Cluster, clusterClass *clust fldPath := field.NewPath("spec", "topology", "controlPlane", "machineHealthCheck") // Validate ControlPlane MachineHealthCheck if defined. - if !cluster.Spec.Topology.ControlPlane.MachineHealthCheck.MachineHealthCheckClass.IsZero() { + if !cluster.Spec.Topology.ControlPlane.MachineHealthCheck.IsZero() { // Ensure ControlPlane does not define a MachineHealthCheck if the ClusterClass does not define MachineInfrastructure. if clusterClass.Spec.ControlPlane.MachineInfrastructure == nil { allErrs = append(allErrs, field.Forbidden( @@ -693,7 +693,7 @@ func validateMachineHealthChecks(cluster *clusterv1.Cluster, clusterClass *clust // Check if the machineHealthCheck is explicitly enabled in the ControlPlaneTopology. if cluster.Spec.Topology.ControlPlane.MachineHealthCheck.Enable != nil && *cluster.Spec.Topology.ControlPlane.MachineHealthCheck.Enable { // Ensure the MHC is defined in at least one of the ControlPlaneTopology of the Cluster or the ControlPlaneClass of the ClusterClass. - if cluster.Spec.Topology.ControlPlane.MachineHealthCheck.MachineHealthCheckClass.IsZero() && clusterClass.Spec.ControlPlane.MachineHealthCheck == nil { + if cluster.Spec.Topology.ControlPlane.MachineHealthCheck.IsZero() && clusterClass.Spec.ControlPlane.MachineHealthCheck == nil { allErrs = append(allErrs, field.Forbidden( fldPath.Child("enable"), fmt.Sprintf("cannot be set to %t as MachineHealthCheck definition is not available in the Cluster topology or the ClusterClass", *cluster.Spec.Topology.ControlPlane.MachineHealthCheck.Enable), @@ -709,7 +709,7 @@ func validateMachineHealthChecks(cluster *clusterv1.Cluster, clusterClass *clust fldPath := field.NewPath("spec", "topology", "workers", "machineDeployments").Key(md.Name).Child("machineHealthCheck") // Validate the MachineDeployment MachineHealthCheck if defined. - if !md.MachineHealthCheck.MachineHealthCheckClass.IsZero() { + if !md.MachineHealthCheck.IsZero() { allErrs = append(allErrs, validateMachineHealthCheckClass(fldPath, cluster.Namespace, &md.MachineHealthCheck.MachineHealthCheckClass)...) } @@ -722,7 +722,7 @@ func validateMachineHealthChecks(cluster *clusterv1.Cluster, clusterClass *clust // Check if the machineHealthCheck is explicitly enabled in the machineDeploymentTopology. if md.MachineHealthCheck.Enable != nil && *md.MachineHealthCheck.Enable { // Ensure the MHC is defined in at least one of the MachineDeploymentTopology of the Cluster or the MachineDeploymentClass of the ClusterClass. - if md.MachineHealthCheck.MachineHealthCheckClass.IsZero() && mdClass.MachineHealthCheck == nil { + if md.MachineHealthCheck.IsZero() && mdClass.MachineHealthCheck == nil { allErrs = append(allErrs, field.Forbidden( fldPath.Child("enable"), fmt.Sprintf("cannot be set to %t as MachineHealthCheck definition is not available in the Cluster topology or the ClusterClass", *md.MachineHealthCheck.Enable), diff --git a/internal/webhooks/clusterclass.go b/internal/webhooks/clusterclass.go index 5070beb23308..3e4c27c81217 100644 --- a/internal/webhooks/clusterclass.go +++ b/internal/webhooks/clusterclass.go @@ -240,7 +240,7 @@ func validateUpdatesToMachineHealthCheckClasses(clusters []clusterv1.Cluster, ol if cluster.Spec.Topology.ControlPlane.MachineHealthCheck != nil && cluster.Spec.Topology.ControlPlane.MachineHealthCheck.Enable != nil && *cluster.Spec.Topology.ControlPlane.MachineHealthCheck.Enable && - cluster.Spec.Topology.ControlPlane.MachineHealthCheck.MachineHealthCheckClass.IsZero() { + cluster.Spec.Topology.ControlPlane.MachineHealthCheck.IsZero() { clustersUsingMHC = append(clustersUsingMHC, cluster.Name) } } @@ -271,7 +271,7 @@ func validateUpdatesToMachineHealthCheckClasses(clusters []clusterv1.Cluster, ol if mdTopology.MachineHealthCheck != nil && mdTopology.MachineHealthCheck.Enable != nil && *mdTopology.MachineHealthCheck.Enable && - mdTopology.MachineHealthCheck.MachineHealthCheckClass.IsZero() { + mdTopology.MachineHealthCheck.IsZero() { clustersUsingMHC = append(clustersUsingMHC, cluster.Name) break } diff --git a/internal/webhooks/machinedeployment.go b/internal/webhooks/machinedeployment.go index c8868172bece..22636ae1c954 100644 --- a/internal/webhooks/machinedeployment.go +++ b/internal/webhooks/machinedeployment.go @@ -306,7 +306,7 @@ func (webhook *MachineDeployment) validate(oldMD, newMD *clusterv1.MachineDeploy } // Validate the metadata of the template. - allErrs = append(allErrs, newMD.Spec.Template.ObjectMeta.Validate(specPath.Child("template", "metadata"))...) + allErrs = append(allErrs, newMD.Spec.Template.Validate(specPath.Child("template", "metadata"))...) if len(allErrs) == 0 { return nil diff --git a/internal/webhooks/machineset.go b/internal/webhooks/machineset.go index 2307a3449e2e..0657ba33b3ab 100644 --- a/internal/webhooks/machineset.go +++ b/internal/webhooks/machineset.go @@ -186,7 +186,7 @@ func (webhook *MachineSet) validate(oldMS, newMS *clusterv1.MachineSet) error { allErrs, field.Invalid( specPath.Child("template", "metadata", "labels"), - newMS.Spec.Template.ObjectMeta.Labels, + newMS.Spec.Template.Labels, fmt.Sprintf("must match spec.selector %q", selector.String()), ), ) @@ -225,7 +225,7 @@ func (webhook *MachineSet) validate(oldMS, newMS *clusterv1.MachineSet) error { allErrs = append(allErrs, validateMSMachineNamingStrategy(newMS.Spec.MachineNamingStrategy, specPath.Child("machineNamingStrategy"))...) } // Validate the metadata of the template. - allErrs = append(allErrs, newMS.Spec.Template.ObjectMeta.Validate(specPath.Child("template", "metadata"))...) + allErrs = append(allErrs, newMS.Spec.Template.Validate(specPath.Child("template", "metadata"))...) if len(allErrs) == 0 { return nil diff --git a/internal/webhooks/patch_validation.go b/internal/webhooks/patch_validation.go index c3447084217d..1add9fe2f37e 100644 --- a/internal/webhooks/patch_validation.go +++ b/internal/webhooks/patch_validation.go @@ -165,10 +165,9 @@ func validateEnabledIf(enabledIf *string, path *field.Path) field.ErrorList { func validateSelectors(selector clusterv1.PatchSelector, class *clusterv1.ClusterClass, path *field.Path) field.ErrorList { var allErrs field.ErrorList - // Return an error if none of the possible selectors are enabled. - if !(selector.MatchResources.InfrastructureCluster || selector.MatchResources.ControlPlane || - (selector.MatchResources.MachineDeploymentClass != nil && len(selector.MatchResources.MachineDeploymentClass.Names) > 0) || - (selector.MatchResources.MachinePoolClass != nil && len(selector.MatchResources.MachinePoolClass.Names) > 0)) { + if !selector.MatchResources.InfrastructureCluster && !selector.MatchResources.ControlPlane && + (selector.MatchResources.MachineDeploymentClass == nil || len(selector.MatchResources.MachineDeploymentClass.Names) == 0) && + (selector.MatchResources.MachinePoolClass == nil || len(selector.MatchResources.MachinePoolClass.Names) == 0) { return append(allErrs, field.Invalid( path, @@ -292,7 +291,7 @@ func validateSelectorName(name string, path *field.Path, resourceName string, in } // the * rune can appear only at the beginning, or ending of the selector. - if strings.Contains(name, "*") && !(strings.HasPrefix(name, "*") || strings.HasSuffix(name, "*")) { + if strings.Contains((name), "*") && !strings.HasPrefix(name, "*") && !strings.HasSuffix(name, "*") { // templateMDClass or templateMPClass can only have "*" rune at the start or end of the string return field.Invalid( path.Child("matchResources", resourceName, "names").Index(index), diff --git a/main.go b/main.go index cdb7f76a1ecc..2b30ba71fa47 100644 --- a/main.go +++ b/main.go @@ -327,7 +327,7 @@ func main() { minVer = version.MinimumKubernetesVersionClusterTopology } - if !(remoteConditionsGracePeriod > remoteConnectionGracePeriod) { + if remoteConditionsGracePeriod <= remoteConnectionGracePeriod { setupLog.Error(errors.Errorf("--remote-conditions-grace-period must be greater than --remote-connection-grace-period"), "Unable to start manager") os.Exit(1) } diff --git a/test/e2e/cluster_upgrade_runtimesdk.go b/test/e2e/cluster_upgrade_runtimesdk.go index 72654cfce6a9..a69f9f7ca774 100644 --- a/test/e2e/cluster_upgrade_runtimesdk.go +++ b/test/e2e/cluster_upgrade_runtimesdk.go @@ -688,7 +688,7 @@ func beforeClusterDeleteHandler(ctx context.Context, c client.Client, cluster ty var blocked = true // If the Cluster is not found it has been deleted and the hook is unblocked. - if apierrors.IsNotFound(c.Get(ctx, client.ObjectKey{Name: cluster.Name, Namespace: cluster.Namespace}, &clusterv1.Cluster{})) { + if apierrors.IsNotFound(c.Get(ctx, cluster, &clusterv1.Cluster{})) { blocked = false } return blocked diff --git a/test/e2e/node_drain.go b/test/e2e/node_drain.go index 6a5f6242fe37..81df369293f0 100644 --- a/test/e2e/node_drain.go +++ b/test/e2e/node_drain.go @@ -278,7 +278,7 @@ func NodeDrainTimeoutSpec(ctx context.Context, inputGetter func() NodeDrainTimeo Namespace: "evictable-workload", NodeSelector: map[string]string{nodeOwnerLabelKey: "KubeadmControlPlane-" + controlplane.Name}, ModifyDeployment: func(deployment *appsv1.Deployment) { - deployment.Spec.Template.ObjectMeta.Finalizers = []string{"test.cluster.x-k8s.io/block"} + deployment.Spec.Template.Finalizers = []string{"test.cluster.x-k8s.io/block"} for k, v := range deploymentLabels { deployment.Spec.Template.Labels[k] = v } @@ -293,7 +293,7 @@ func NodeDrainTimeoutSpec(ctx context.Context, inputGetter func() NodeDrainTimeo Namespace: "evictable-workload", NodeSelector: map[string]string{nodeOwnerLabelKey: "MachineDeployment-" + md.Name}, ModifyDeployment: func(deployment *appsv1.Deployment) { - deployment.Spec.Template.ObjectMeta.Finalizers = []string{"test.cluster.x-k8s.io/block"} + deployment.Spec.Template.Finalizers = []string{"test.cluster.x-k8s.io/block"} for k, v := range deploymentLabels { deployment.Spec.Template.Labels[k] = v } diff --git a/test/e2e/scale.go b/test/e2e/scale.go index 657ce2434420..db7021db772b 100644 --- a/test/e2e/scale.go +++ b/test/e2e/scale.go @@ -349,7 +349,7 @@ func ScaleSpec(ctx context.Context, inputGetter func() ScaleSpecInput) { // then deploy the ClusterClass in this namespace. if !deployClusterInSeparateNamespaces || useCrossNamespaceClusterClass { if len(baseClusterClassYAML) > 0 { - clusterClassYAML := bytes.Replace(baseClusterClassYAML, []byte(scaleClusterNamespacePlaceholder), []byte(namespace.Name), -1) + clusterClassYAML := bytes.ReplaceAll(baseClusterClassYAML, []byte(scaleClusterNamespacePlaceholder), []byte(namespace.Name)) log.Logf("Apply ClusterClass") Eventually(func() error { return input.BootstrapClusterProxy.CreateOrUpdate(ctx, clusterClassYAML) @@ -359,7 +359,7 @@ func ScaleSpec(ctx context.Context, inputGetter func() ScaleSpecInput) { for i := range additionalClusterClassCount { additionalName := fmt.Sprintf("%s-%d", input.ClusterClassName, i+1) log.Logf("Apply additional ClusterClass %s/%s", namespace.Name, additionalName) - additionalClassYAML := bytes.Replace(clusterClassYAML, []byte(input.ClusterClassName), []byte(additionalName), -1) + additionalClassYAML := bytes.ReplaceAll(clusterClassYAML, []byte(scaleClusterNamePlaceholder), []byte(additionalName)) Eventually(func() error { return input.BootstrapClusterProxy.CreateOrUpdate(ctx, additionalClassYAML) }, 1*time.Minute).Should(Succeed()) @@ -722,7 +722,7 @@ func createClusterWorker(ctx context.Context, clusterProxy framework.ClusterProx // * Deploy ClusterClass in new namespace. if deployClusterInSeparateNamespaces && !enableCrossNamespaceClusterClass { log.Logf("Apply ClusterClass in namespace %s", namespaceName) - clusterClassYAML := bytes.Replace(customizedClusterClassYAML, []byte(scaleClusterNamespacePlaceholder), []byte(namespaceName), -1) + clusterClassYAML := bytes.ReplaceAll(customizedClusterClassYAML, []byte(scaleClusterNamespacePlaceholder), []byte(namespaceName)) Eventually(func() error { return clusterProxy.CreateOrUpdate(ctx, clusterClassYAML) }, 1*time.Minute).Should(Succeed()) @@ -731,7 +731,7 @@ func createClusterWorker(ctx context.Context, clusterProxy framework.ClusterProx for i := range additionalClusterClasses { additionalName := fmt.Sprintf("%s-%d", clusterClassName, i+1) log.Logf("Apply additional ClusterClass %s/%s", namespaceName, additionalName) - additionalClassYAML := bytes.Replace(clusterClassYAML, []byte(clusterClassName), []byte(additionalName), -1) + additionalClassYAML := bytes.ReplaceAll(clusterClassYAML, []byte(clusterClassName), []byte(additionalName)) Eventually(func() error { return clusterProxy.CreateOrUpdate(ctx, additionalClassYAML) }, 1*time.Minute).Should(Succeed()) @@ -742,12 +742,12 @@ func createClusterWorker(ctx context.Context, clusterProxy framework.ClusterProx clusterTemplateYAML := customizedClusterTemplateYAML if enableCrossNamespaceClusterClass { // Set classNamespace to the defaultNamespace where the ClusterClass is located. - clusterTemplateYAML = bytes.Replace(clusterTemplateYAML, + clusterTemplateYAML = bytes.ReplaceAll(clusterTemplateYAML, []byte(fmt.Sprintf("classNamespace: %s", scaleClusterNamespacePlaceholder)), - []byte(fmt.Sprintf("classNamespace: %s", defaultNamespace)), -1) + []byte(fmt.Sprintf("classNamespace: %s", defaultNamespace))) } - clusterTemplateYAML = bytes.Replace(clusterTemplateYAML, []byte(scaleClusterNamespacePlaceholder), []byte(namespaceName), -1) - clusterTemplateYAML = bytes.Replace(clusterTemplateYAML, []byte(scaleClusterNamePlaceholder), []byte(clusterName), -1) + clusterTemplateYAML = bytes.ReplaceAll(clusterTemplateYAML, []byte(scaleClusterNamespacePlaceholder), []byte(namespaceName)) + clusterTemplateYAML = bytes.ReplaceAll(clusterTemplateYAML, []byte(scaleClusterNamePlaceholder), []byte(clusterName)) // Deploy Cluster. create(ctx, namespaceName, clusterName, clusterTemplateYAML) diff --git a/test/framework/clusterctl/e2e_config.go b/test/framework/clusterctl/e2e_config.go index 4654541a8fd4..e5616b165606 100644 --- a/test/framework/clusterctl/e2e_config.go +++ b/test/framework/clusterctl/e2e_config.go @@ -332,7 +332,7 @@ func resolveReleaseMarker(ctx context.Context, releaseMarker string, goproxyClie } gomodule := gomoduleParts[0] - includePrereleases := false + var includePrereleases bool if strings.HasPrefix(gomoduleParts[1], "latest-") { includePrereleases = true } diff --git a/test/framework/clusterctl/repository.go b/test/framework/clusterctl/repository.go index 99d4f651834c..ebcad9e2100d 100644 --- a/test/framework/clusterctl/repository.go +++ b/test/framework/clusterctl/repository.go @@ -121,7 +121,8 @@ func CreateRepository(ctx context.Context, input CreateRepositoryInput) string { Type: provider.Type, } providers = append(providers, p) - if !(clusterctlv1.ProviderType(provider.Type) == clusterctlv1.IPAMProviderType || clusterctlv1.ProviderType(provider.Type) == clusterctlv1.RuntimeExtensionProviderType || clusterctlv1.ProviderType(provider.Type) == clusterctlv1.AddonProviderType) { + providerType := clusterctlv1.ProviderType(provider.Type) + if providerType != clusterctlv1.IPAMProviderType && providerType != clusterctlv1.RuntimeExtensionProviderType && providerType != clusterctlv1.AddonProviderType { providersV1_2 = append(providersV1_2, p) } } @@ -196,7 +197,7 @@ func AdjustConfigPathForBinary(clusterctlBinaryPath, clusterctlConfigPath string Expect(err).ToNot(HaveOccurred()) if version.LT(semver.MustParse("1.3.0")) { - return strings.Replace(clusterctlConfigPath, clusterctlConfigFileName, clusterctlConfigV1_2FileName, -1) + return strings.ReplaceAll(clusterctlConfigPath, clusterctlConfigFileName, clusterctlConfigV1_2FileName) } return clusterctlConfigPath } diff --git a/test/framework/controlplane_helpers.go b/test/framework/controlplane_helpers.go index 42fa046c7460..95e2122ff99a 100644 --- a/test/framework/controlplane_helpers.go +++ b/test/framework/controlplane_helpers.go @@ -354,10 +354,10 @@ func UpgradeControlPlaneAndWaitForUpgrade(ctx context.Context, input UpgradeCont } if input.EtcdImageTag != "" { - input.ControlPlane.Spec.KubeadmConfigSpec.ClusterConfiguration.Etcd.Local.ImageMeta.ImageTag = input.EtcdImageTag + input.ControlPlane.Spec.KubeadmConfigSpec.ClusterConfiguration.Etcd.Local.ImageTag = input.EtcdImageTag } if input.DNSImageTag != "" { - input.ControlPlane.Spec.KubeadmConfigSpec.ClusterConfiguration.DNS.ImageMeta.ImageTag = input.DNSImageTag + input.ControlPlane.Spec.KubeadmConfigSpec.ClusterConfiguration.DNS.ImageTag = input.DNSImageTag } Eventually(func() error { diff --git a/test/framework/ownerreference_helpers.go b/test/framework/ownerreference_helpers.go index 948c376ccd50..f028e0c7c6b7 100644 --- a/test/framework/ownerreference_helpers.go +++ b/test/framework/ownerreference_helpers.go @@ -356,7 +356,7 @@ func HasExactOwners(gotOwners []metav1.OwnerReference, wantOwners ...metav1.Owne } func ownerReferenceString(ref metav1.OwnerReference) string { - controller := false + var controller bool if ref.Controller != nil && *ref.Controller { controller = true } diff --git a/test/infrastructure/container/docker.go b/test/infrastructure/container/docker.go index 53c7e1c7ca3c..e19c9837f4d2 100644 --- a/test/infrastructure/container/docker.go +++ b/test/infrastructure/container/docker.go @@ -538,8 +538,8 @@ func (d *dockerRuntime) RunContainer(ctx context.Context, runConfig *RunContaine return fmt.Errorf("error inspecting container %s: %v", resp.ID, err) } - if containerJSON.ContainerJSONBase.State.ExitCode != 0 { - return fmt.Errorf("error container run failed with exit code %d", containerJSON.ContainerJSONBase.State.ExitCode) + if containerJSON.State.ExitCode != 0 { + return fmt.Errorf("error container run failed with exit code %d", containerJSON.State.ExitCode) } return nil diff --git a/test/infrastructure/docker/exp/internal/controllers/dockermachinepool_controller.go b/test/infrastructure/docker/exp/internal/controllers/dockermachinepool_controller.go index d1513a4917d7..1e3310e27694 100644 --- a/test/infrastructure/docker/exp/internal/controllers/dockermachinepool_controller.go +++ b/test/infrastructure/docker/exp/internal/controllers/dockermachinepool_controller.go @@ -136,7 +136,7 @@ func (r *DockerMachinePoolReconciler) Reconcile(ctx context.Context, req ctrl.Re }() // Handle deleted machines - if !dockerMachinePool.ObjectMeta.DeletionTimestamp.IsZero() { + if !dockerMachinePool.DeletionTimestamp.IsZero() { return ctrl.Result{}, r.reconcileDelete(ctx, cluster, machinePool, dockerMachinePool) } diff --git a/test/infrastructure/docker/internal/controllers/backends/docker/dockercluster_backend.go b/test/infrastructure/docker/internal/controllers/backends/docker/dockercluster_backend.go index 6c44876f4cc0..b27182c441ad 100644 --- a/test/infrastructure/docker/internal/controllers/backends/docker/dockercluster_backend.go +++ b/test/infrastructure/docker/internal/controllers/backends/docker/dockercluster_backend.go @@ -173,7 +173,7 @@ func (r *ClusterBackEndReconciler) PatchDevCluster(ctx context.Context, patchHel v1beta1conditions.WithConditions( infrav1.LoadBalancerAvailableCondition, ), - v1beta1conditions.WithStepCounterIf(dockerCluster.ObjectMeta.DeletionTimestamp.IsZero()), + v1beta1conditions.WithStepCounterIf(dockerCluster.DeletionTimestamp.IsZero()), ) if err := conditions.SetSummaryCondition(dockerCluster, dockerCluster, infrav1.DevClusterReadyV1Beta2Condition, conditions.ForConditionTypes{ diff --git a/test/infrastructure/docker/internal/controllers/backends/docker/dockermachine_backend.go b/test/infrastructure/docker/internal/controllers/backends/docker/dockermachine_backend.go index 9193739c5d95..2d0b4f5c8d78 100644 --- a/test/infrastructure/docker/internal/controllers/backends/docker/dockermachine_backend.go +++ b/test/infrastructure/docker/internal/controllers/backends/docker/dockermachine_backend.go @@ -283,7 +283,7 @@ func (r *MachineBackendReconciler) ReconcileNormal(ctx context.Context, cluster return default: updatedDockerMachine := &infrav1.DockerMachine{} - if err := r.Client.Get(ctx, client.ObjectKeyFromObject(dockerMachine), updatedDockerMachine); err == nil && + if err := r.Get(ctx, client.ObjectKeyFromObject(dockerMachine), updatedDockerMachine); err == nil && !updatedDockerMachine.DeletionTimestamp.IsZero() { log.Info("Cancelling Bootstrap because the underlying machine has been deleted") cancel() @@ -462,7 +462,7 @@ func (r *MachineBackendReconciler) PatchDevMachine(ctx context.Context, patchHel infrav1.ContainerProvisionedCondition, infrav1.BootstrapExecSucceededCondition, ), - v1beta1conditions.WithStepCounterIf(dockerMachine.ObjectMeta.DeletionTimestamp.IsZero() && dockerMachine.Spec.ProviderID == nil), + v1beta1conditions.WithStepCounterIf(dockerMachine.DeletionTimestamp.IsZero() && dockerMachine.Spec.ProviderID == nil), ) if err := conditions.SetSummaryCondition(dockerMachine, dockerMachine, infrav1.DevMachineReadyV1Beta2Condition, conditions.ForConditionTypes{ @@ -506,7 +506,7 @@ func (r *MachineBackendReconciler) reconcileLoadBalancerConfiguration(ctx contex controlPlaneWeight := map[string]int{} controlPlaneMachineList := &clusterv1.MachineList{} - if err := r.Client.List(ctx, controlPlaneMachineList, client.InNamespace(cluster.Namespace), client.MatchingLabels{ + if err := r.List(ctx, controlPlaneMachineList, client.InNamespace(cluster.Namespace), client.MatchingLabels{ clusterv1.MachineControlPlaneLabel: "", clusterv1.ClusterNameLabel: cluster.Name, }); err != nil { @@ -534,7 +534,7 @@ func (r *MachineBackendReconciler) reconcileLoadBalancerConfiguration(ctx contex func (r *MachineBackendReconciler) getBootstrapData(ctx context.Context, namespace string, dataSecretName string) (string, bootstrapv1.Format, error) { s := &corev1.Secret{} key := client.ObjectKey{Namespace: namespace, Name: dataSecretName} - if err := r.Client.Get(ctx, key, s); err != nil { + if err := r.Get(ctx, key, s); err != nil { return "", "", errors.Wrapf(err, "failed to retrieve bootstrap data secret %s", dataSecretName) } diff --git a/test/infrastructure/docker/internal/controllers/backends/inmemory/inmemorycluster_backend.go b/test/infrastructure/docker/internal/controllers/backends/inmemory/inmemorycluster_backend.go index 03a27898b411..8f75092878d7 100644 --- a/test/infrastructure/docker/internal/controllers/backends/inmemory/inmemorycluster_backend.go +++ b/test/infrastructure/docker/internal/controllers/backends/inmemory/inmemorycluster_backend.go @@ -47,7 +47,7 @@ type ClusterBackendReconciler struct { // NOTE: This is done at best effort in order to make iterative development workflow easier. func (r *ClusterBackendReconciler) HotRestart(ctx context.Context) error { inMemoryClusterList := &infrav1.DevClusterList{} - if err := r.Client.List(ctx, inMemoryClusterList); err != nil { + if err := r.List(ctx, inMemoryClusterList); err != nil { return err } diff --git a/test/infrastructure/docker/internal/controllers/backends/inmemory/inmemorymachine_backend.go b/test/infrastructure/docker/internal/controllers/backends/inmemory/inmemorymachine_backend.go index e9e419dc1375..64802930b64c 100644 --- a/test/infrastructure/docker/internal/controllers/backends/inmemory/inmemorymachine_backend.go +++ b/test/infrastructure/docker/internal/controllers/backends/inmemory/inmemorymachine_backend.go @@ -1250,7 +1250,7 @@ func (r *MachineBackendReconciler) PatchDevMachine(ctx context.Context, patchHel // A step counter is added to represent progress during the provisioning process (instead we are hiding the step counter during the deletion process). v1beta1conditions.SetSummary(inMemoryMachine, v1beta1conditions.WithConditions(inMemoryMachineConditions...), - v1beta1conditions.WithStepCounterIf(inMemoryMachine.ObjectMeta.DeletionTimestamp.IsZero() && inMemoryMachine.Spec.ProviderID == nil), + v1beta1conditions.WithStepCounterIf(inMemoryMachine.DeletionTimestamp.IsZero() && inMemoryMachine.Spec.ProviderID == nil), ) if err := conditions.SetSummaryCondition(inMemoryMachine, inMemoryMachine, infrav1.DevMachineReadyV1Beta2Condition, inMemoryMachineV1Beta2Conditions, diff --git a/test/infrastructure/docker/internal/controllers/devcluster_controller.go b/test/infrastructure/docker/internal/controllers/devcluster_controller.go index 6d47abda1f74..764cee67c3df 100644 --- a/test/infrastructure/docker/internal/controllers/devcluster_controller.go +++ b/test/infrastructure/docker/internal/controllers/devcluster_controller.go @@ -96,7 +96,7 @@ func (r *DevClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) // Fetch the DevCluster instance devCluster := &infrav1.DevCluster{} - if err := r.Client.Get(ctx, req.NamespacedName, devCluster); err != nil { + if err := r.Get(ctx, req.NamespacedName, devCluster); err != nil { if apierrors.IsNotFound(err) { return ctrl.Result{}, nil } diff --git a/test/infrastructure/docker/internal/controllers/devmachine_controller.go b/test/infrastructure/docker/internal/controllers/devmachine_controller.go index 375721ac0f2d..8706f336b8e6 100644 --- a/test/infrastructure/docker/internal/controllers/devmachine_controller.go +++ b/test/infrastructure/docker/internal/controllers/devmachine_controller.go @@ -114,7 +114,7 @@ func (r *DevMachineReconciler) Reconcile(ctx context.Context, req ctrl.Request) // Fetch the DevMachine instance. devMachine := &infrav1.DevMachine{} - if err := r.Client.Get(ctx, req.NamespacedName, devMachine); err != nil { + if err := r.Get(ctx, req.NamespacedName, devMachine); err != nil { if apierrors.IsNotFound(err) { return ctrl.Result{}, nil } @@ -181,7 +181,7 @@ func (r *DevMachineReconciler) Reconcile(ctx context.Context, req ctrl.Request) Namespace: devMachine.Namespace, Name: cluster.Spec.InfrastructureRef.Name, } - if err := r.Client.Get(ctx, devClusterName, devCluster); err != nil { + if err := r.Get(ctx, devClusterName, devCluster); err != nil { log.Info("DevCluster is not available yet") return ctrl.Result{}, nil } @@ -196,7 +196,7 @@ func (r *DevMachineReconciler) Reconcile(ctx context.Context, req ctrl.Request) }() // Handle deleted machines - if !devMachine.ObjectMeta.DeletionTimestamp.IsZero() { + if !devMachine.DeletionTimestamp.IsZero() { return backendReconciler.ReconcileDelete(ctx, cluster, devCluster, machine, devMachine) } @@ -238,7 +238,7 @@ func (r *DevMachineReconciler) DevClusterToDevMachines(ctx context.Context, o cl labels := map[string]string{clusterv1.ClusterNameLabel: cluster.Name} machineList := &clusterv1.MachineList{} - if err := r.Client.List(ctx, machineList, client.InNamespace(c.Namespace), client.MatchingLabels(labels)); err != nil { + if err := r.List(ctx, machineList, client.InNamespace(c.Namespace), client.MatchingLabels(labels)); err != nil { return nil } for _, m := range machineList.Items { diff --git a/test/infrastructure/docker/internal/controllers/dockercluster_controller.go b/test/infrastructure/docker/internal/controllers/dockercluster_controller.go index 04deffc9dea5..90339e7dc535 100644 --- a/test/infrastructure/docker/internal/controllers/dockercluster_controller.go +++ b/test/infrastructure/docker/internal/controllers/dockercluster_controller.go @@ -63,7 +63,7 @@ func (r *DockerClusterReconciler) Reconcile(ctx context.Context, req ctrl.Reques // Fetch the DockerCluster instance dockerCluster := &infrav1.DockerCluster{} - if err := r.Client.Get(ctx, req.NamespacedName, dockerCluster); err != nil { + if err := r.Get(ctx, req.NamespacedName, dockerCluster); err != nil { if apierrors.IsNotFound(err) { return ctrl.Result{}, nil } @@ -157,7 +157,7 @@ func patchDockerCluster(ctx context.Context, patchHelper *patch.Helper, dockerCl v1beta1conditions.WithConditions( infrav1.LoadBalancerAvailableCondition, ), - v1beta1conditions.WithStepCounterIf(dockerCluster.ObjectMeta.DeletionTimestamp.IsZero()), + v1beta1conditions.WithStepCounterIf(dockerCluster.DeletionTimestamp.IsZero()), ) if err := conditions.SetSummaryCondition(dockerCluster, dockerCluster, infrav1.DevClusterReadyV1Beta2Condition, conditions.ForConditionTypes{ diff --git a/test/infrastructure/docker/internal/controllers/dockermachine_controller.go b/test/infrastructure/docker/internal/controllers/dockermachine_controller.go index cca27948aa1b..6da970ee9772 100644 --- a/test/infrastructure/docker/internal/controllers/dockermachine_controller.go +++ b/test/infrastructure/docker/internal/controllers/dockermachine_controller.go @@ -68,7 +68,7 @@ func (r *DockerMachineReconciler) Reconcile(ctx context.Context, req ctrl.Reques // Fetch the DockerMachine instance. dockerMachine := &infrav1.DockerMachine{} - if err := r.Client.Get(ctx, req.NamespacedName, dockerMachine); err != nil { + if err := r.Get(ctx, req.NamespacedName, dockerMachine); err != nil { if apierrors.IsNotFound(err) { return ctrl.Result{}, nil } @@ -120,7 +120,7 @@ func (r *DockerMachineReconciler) Reconcile(ctx context.Context, req ctrl.Reques Namespace: dockerMachine.Namespace, Name: cluster.Spec.InfrastructureRef.Name, } - if err := r.Client.Get(ctx, dockerClusterName, dockerCluster); err != nil { + if err := r.Get(ctx, dockerClusterName, dockerCluster); err != nil { log.Info("DockerCluster is not available yet") return ctrl.Result{}, nil } @@ -155,7 +155,7 @@ func (r *DockerMachineReconciler) Reconcile(ctx context.Context, req ctrl.Reques }() // Handle deleted machines - if !devMachine.ObjectMeta.DeletionTimestamp.IsZero() { + if !devMachine.DeletionTimestamp.IsZero() { return r.backendReconciler.ReconcileDelete(ctx, cluster, devCluster, machine, devMachine) } @@ -231,7 +231,7 @@ func (r *DockerMachineReconciler) dockerClusterToDockerMachines(ctx context.Cont labels := map[string]string{clusterv1.ClusterNameLabel: cluster.Name} machineList := &clusterv1.MachineList{} - if err := r.Client.List(ctx, machineList, client.InNamespace(c.Namespace), client.MatchingLabels(labels)); err != nil { + if err := r.List(ctx, machineList, client.InNamespace(c.Namespace), client.MatchingLabels(labels)); err != nil { return nil } for _, m := range machineList.Items { @@ -253,7 +253,7 @@ func patchDockerMachine(ctx context.Context, patchHelper *patch.Helper, dockerMa infrav1.ContainerProvisionedCondition, infrav1.BootstrapExecSucceededCondition, ), - v1beta1conditions.WithStepCounterIf(dockerMachine.ObjectMeta.DeletionTimestamp.IsZero() && dockerMachine.Spec.ProviderID == nil), + v1beta1conditions.WithStepCounterIf(dockerMachine.DeletionTimestamp.IsZero() && dockerMachine.Spec.ProviderID == nil), ) if err := conditions.SetSummaryCondition(dockerMachine, dockerMachine, infrav1.DevMachineReadyV1Beta2Condition, conditions.ForConditionTypes{ diff --git a/test/infrastructure/docker/internal/provisioning/ignition/kindadapter.go b/test/infrastructure/docker/internal/provisioning/ignition/kindadapter.go index 7f01e86ddde7..2486ed95b813 100644 --- a/test/infrastructure/docker/internal/provisioning/ignition/kindadapter.go +++ b/test/infrastructure/docker/internal/provisioning/ignition/kindadapter.go @@ -124,7 +124,7 @@ func hackKubeadmIgnoreErrors(s string) string { lines := strings.Split(s, "\n") for idx, line := range lines { - if !(strings.Contains(line, "kubeadm init") || strings.Contains(line, "kubeadm join")) { + if !strings.Contains(line, "kubeadm init") && !strings.Contains(line, "kubeadm join") { continue } diff --git a/test/infrastructure/kind/mapper.go b/test/infrastructure/kind/mapper.go index f39eaf9919e2..f99f4af33496 100644 --- a/test/infrastructure/kind/mapper.go +++ b/test/infrastructure/kind/mapper.go @@ -537,7 +537,7 @@ func GetMapping(k8sVersion semver.Version, customImage string) Mapping { } for _, m := range preBuiltMappings { // If the mapping isn't for the right Major/Minor, ignore it. - if !(k8sVersion.Major == m.KubernetesVersion.Major && k8sVersion.Minor == m.KubernetesVersion.Minor) { + if k8sVersion.Major != m.KubernetesVersion.Major || k8sVersion.Minor != m.KubernetesVersion.Minor { continue } diff --git a/util/cache/cache.go b/util/cache/cache.go index 31ec3e2c17d0..353ea170d3dd 100644 --- a/util/cache/cache.go +++ b/util/cache/cache.go @@ -89,7 +89,7 @@ func (r *cache[E]) Add(entry E) { // Note: entries expire after the ttl. func (r *cache[E]) Has(key string) (E, bool) { // Note: We can ignore the error here because GetByKey never returns an error. - item, exists, _ := r.Store.GetByKey(key) + item, exists, _ := r.GetByKey(key) if exists { return item.(E), true } diff --git a/util/collections/machine_filters_test.go b/util/collections/machine_filters_test.go index 0caea18dd71b..2e2f6b810934 100644 --- a/util/collections/machine_filters_test.go +++ b/util/collections/machine_filters_test.go @@ -567,7 +567,7 @@ func testControlPlaneMachine(name string) *clusterv1.Machine { }, } controlPlaneMachine := testMachine(name) - controlPlaneMachine.ObjectMeta.Labels[clusterv1.MachineControlPlaneLabel] = "" + controlPlaneMachine.Labels[clusterv1.MachineControlPlaneLabel] = "" controlPlaneMachine.OwnerReferences = ownedRef return controlPlaneMachine diff --git a/util/conditions/deprecated/v1beta1/unstructured.go b/util/conditions/deprecated/v1beta1/unstructured.go index 3abfd02fc717..aa3fc9e6ef0e 100644 --- a/util/conditions/deprecated/v1beta1/unstructured.go +++ b/util/conditions/deprecated/v1beta1/unstructured.go @@ -77,7 +77,7 @@ func (c *unstructuredWrapper) SetV1Beta1Conditions(conditions clusterv1.Conditio } // unstructured.SetNestedField returns an error only if value cannot be set because one of // the nesting levels is not a map[string]interface{}; this is not the case so the error should never happen here. - err := unstructured.SetNestedField(c.Unstructured.Object, v, "status", "conditions") + err := unstructured.SetNestedField(c.Object, v, "status", "conditions") if err != nil { log.Log.Error(err, "Failed to set Conditions on unstructured object. This error shouldn't have occurred, please file an issue.", "groupVersionKind", c.GroupVersionKind(), "name", c.GetName(), "namespace", c.GetNamespace()) } diff --git a/util/patch/patch.go b/util/patch/patch.go index cecd9bd90570..d187939cb9ef 100644 --- a/util/patch/patch.go +++ b/util/patch/patch.go @@ -180,7 +180,7 @@ func (h *Helper) Patch(ctx context.Context, obj client.Object, opts ...Option) e } if err := h.patchStatus(ctx, obj); err != nil { - if !(apierrors.IsNotFound(err) && !obj.GetDeletionTimestamp().IsZero() && len(obj.GetFinalizers()) == 0) { + if !apierrors.IsNotFound(err) || obj.GetDeletionTimestamp().IsZero() || len(obj.GetFinalizers()) != 0 { errs = append(errs, err) } } diff --git a/util/patch/patch_test.go b/util/patch/patch_test.go index de6502ac3f48..34197f24cfb7 100644 --- a/util/patch/patch_test.go +++ b/util/patch/patch_test.go @@ -873,7 +873,7 @@ func TestPatchHelper(t *testing.T) { obj.Status.ReadyReplicas = ptr.To[int32](6) t.Log("Updating the object metadata") - obj.ObjectMeta.Annotations = map[string]string{ + obj.Annotations = map[string]string{ "test1": "annotation", } diff --git a/util/test/builder/builders.go b/util/test/builder/builders.go index 282ff27e1198..d98c802fcd45 100644 --- a/util/test/builder/builders.go +++ b/util/test/builder/builders.go @@ -1986,7 +1986,7 @@ func (m *MachineBuilder) Build() *clusterv1.Machine { if len(m.labels) == 0 { machine.Labels = map[string]string{} } - machine.ObjectMeta.Labels[clusterv1.ClusterNameLabel] = m.clusterName + machine.Labels[clusterv1.ClusterNameLabel] = m.clusterName } return machine } diff --git a/util/util.go b/util/util.go index 2392825ae4b1..97b9467bf9d4 100644 --- a/util/util.go +++ b/util/util.go @@ -133,7 +133,7 @@ func GetMachineIfExists(ctx context.Context, c client.Client, namespace, name st // IsControlPlaneMachine checks machine is a control plane node. func IsControlPlaneMachine(machine *clusterv1.Machine) bool { - _, ok := machine.ObjectMeta.Labels[clusterv1.MachineControlPlaneLabel] + _, ok := machine.Labels[clusterv1.MachineControlPlaneLabel] return ok }