Skip to content

Commit 8c30949

Browse files
Merge pull request #666 from salasberryfin/workflows-bump-golangci-lint
chore: bump golangci-lint-action to v8.0.0
2 parents eadfd02 + 567e5c7 commit 8c30949

File tree

6 files changed

+132
-132
lines changed

6 files changed

+132
-132
lines changed

.github/workflows/golangci-lint.yml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -22,9 +22,9 @@ jobs:
2222
with:
2323
go-version: 1.23.0
2424
- name: golangci-lint
25-
uses: golangci/golangci-lint-action@v7.0.0
25+
uses: golangci/golangci-lint-action@v8.0.0
2626
with:
27-
version: v2.0.2
27+
version: v2.1.0
2828
working-directory: ${{matrix.working-directory}}
2929
args: --timeout=5m0s
3030
skip-cache: true

bootstrap/internal/controllers/rke2config_scope.go

Lines changed: 24 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -54,30 +54,6 @@ type Scope struct {
5454
ControlPlane *controlplanev1.RKE2ControlPlane
5555
}
5656

57-
// HasMachineOwner returns true if the RKE2Config is owned by a Machine.
58-
func (s *Scope) HasMachineOwner() bool {
59-
return s.Machine != nil
60-
}
61-
62-
// HasMachinePoolOwner returns true if the RKE2Config is owned by a MachinePool.
63-
func (s *Scope) HasMachinePoolOwner() bool {
64-
return s.MachinePool != nil
65-
}
66-
67-
// HasControlPlaneOwner returns true if the RKE2Config is owned by a Machine which is also a ControlPlane.
68-
func (s *Scope) HasControlPlaneOwner() bool {
69-
return s.Machine != nil && s.ControlPlane != nil
70-
}
71-
72-
// GetDesiredVersion returns the K8S version associated to the RKE2Config owner.
73-
func (s *Scope) GetDesiredVersion() string {
74-
if s.MachinePool != nil {
75-
return *s.MachinePool.Spec.Template.Spec.Version
76-
}
77-
78-
return *s.Machine.Spec.Version
79-
}
80-
8157
// NewScope initializes the RKE2Config scope given a new request.
8258
func NewScope(ctx context.Context, req ctrl.Request, client client.Client) (*Scope, error) {
8359
logger := log.FromContext(ctx)
@@ -150,3 +126,27 @@ func NewScope(ctx context.Context, req ctrl.Request, client client.Client) (*Sco
150126
Cluster: cluster,
151127
}, nil
152128
}
129+
130+
// HasMachineOwner returns true if the RKE2Config is owned by a Machine.
131+
func (s *Scope) HasMachineOwner() bool {
132+
return s.Machine != nil
133+
}
134+
135+
// HasMachinePoolOwner returns true if the RKE2Config is owned by a MachinePool.
136+
func (s *Scope) HasMachinePoolOwner() bool {
137+
return s.MachinePool != nil
138+
}
139+
140+
// HasControlPlaneOwner returns true if the RKE2Config is owned by a Machine which is also a ControlPlane.
141+
func (s *Scope) HasControlPlaneOwner() bool {
142+
return s.Machine != nil && s.ControlPlane != nil
143+
}
144+
145+
// GetDesiredVersion returns the K8S version associated to the RKE2Config owner.
146+
func (s *Scope) GetDesiredVersion() string {
147+
if s.MachinePool != nil {
148+
return *s.MachinePool.Spec.Template.Spec.Version
149+
}
150+
151+
return *s.Machine.Spec.Version
152+
}

controlplane/internal/controllers/rke2controlplane_controller.go

Lines changed: 22 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -308,6 +308,28 @@ func (r *RKE2ControlPlaneReconciler) SetupWithManager(ctx context.Context, mgr c
308308
return nil
309309
}
310310

311+
// ClusterToRKE2ControlPlane is a handler.ToRequestsFunc to be used to enqueue requests for reconciliation
312+
// for RKE2ControlPlane based on updates to a Cluster.
313+
func (r *RKE2ControlPlaneReconciler) ClusterToRKE2ControlPlane(ctx context.Context) handler.MapFunc {
314+
log := log.FromContext(ctx)
315+
316+
return func(_ context.Context, o client.Object) []ctrl.Request {
317+
c, ok := o.(*clusterv1.Cluster)
318+
if !ok {
319+
log.Error(nil, fmt.Sprintf("Expected a Cluster but got a %T", o))
320+
321+
return nil
322+
}
323+
324+
controlPlaneRef := c.Spec.ControlPlaneRef
325+
if controlPlaneRef != nil && controlPlaneRef.Kind == "RKE2ControlPlane" {
326+
return []ctrl.Request{{NamespacedName: client.ObjectKey{Namespace: controlPlaneRef.Namespace, Name: controlPlaneRef.Name}}}
327+
}
328+
329+
return nil
330+
}
331+
}
332+
311333
// nolint:gocyclo
312334
func (r *RKE2ControlPlaneReconciler) updateStatus(ctx context.Context, rcp *controlplanev1.RKE2ControlPlane, cluster *clusterv1.Cluster) error {
313335
logger := log.FromContext(ctx)
@@ -1040,28 +1062,6 @@ func (r *RKE2ControlPlaneReconciler) upgradeControlPlane(
10401062
}
10411063
}
10421064

1043-
// ClusterToRKE2ControlPlane is a handler.ToRequestsFunc to be used to enqueue requests for reconciliation
1044-
// for RKE2ControlPlane based on updates to a Cluster.
1045-
func (r *RKE2ControlPlaneReconciler) ClusterToRKE2ControlPlane(ctx context.Context) handler.MapFunc {
1046-
log := log.FromContext(ctx)
1047-
1048-
return func(_ context.Context, o client.Object) []ctrl.Request {
1049-
c, ok := o.(*clusterv1.Cluster)
1050-
if !ok {
1051-
log.Error(nil, fmt.Sprintf("Expected a Cluster but got a %T", o))
1052-
1053-
return nil
1054-
}
1055-
1056-
controlPlaneRef := c.Spec.ControlPlaneRef
1057-
if controlPlaneRef != nil && controlPlaneRef.Kind == "RKE2ControlPlane" {
1058-
return []ctrl.Request{{NamespacedName: client.ObjectKey{Namespace: controlPlaneRef.Namespace, Name: controlPlaneRef.Name}}}
1059-
}
1060-
1061-
return nil
1062-
}
1063-
}
1064-
10651065
func (r *RKE2ControlPlaneReconciler) reconcilePreTerminateHook(ctx context.Context, controlPlane *rke2.ControlPlane) (ctrl.Result, error) {
10661066
// Ensure that every active machine has the drain hook set
10671067
patchHookAnnotation := false

pkg/proxy/addr.go

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -34,6 +34,14 @@ type Addr struct {
3434
identifier uint32
3535
}
3636

37+
// NewAddrFromConn creates an Addr from the given connection.
38+
func NewAddrFromConn(c *Conn) Addr {
39+
return Addr{
40+
port: c.stream.Headers().Get(corev1.PortHeader),
41+
identifier: c.stream.Identifier(),
42+
}
43+
}
44+
3745
// Network returns a fake network.
3846
func (a Addr) Network() string {
3947
return portforward.PortForwardProtocolV1Name
@@ -49,11 +57,3 @@ func (a Addr) String() string {
4957
a.port,
5058
)
5159
}
52-
53-
// NewAddrFromConn creates an Addr from the given connection.
54-
func NewAddrFromConn(c *Conn) Addr {
55-
return Addr{
56-
port: c.stream.Headers().Get(corev1.PortHeader),
57-
identifier: c.stream.Identifier(),
58-
}
59-
}

pkg/proxy/conn.go

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -32,6 +32,15 @@ type Conn struct {
3232
writeDeadline time.Time
3333
}
3434

35+
// NewConn creates a new net/conn interface based on an underlying Kubernetes
36+
// API server proxy connection.
37+
func NewConn(connection httpstream.Connection, stream httpstream.Stream) *Conn {
38+
return &Conn{
39+
connection: connection,
40+
stream: stream,
41+
}
42+
}
43+
3544
// Read from the connection.
3645
func (c *Conn) Read(b []byte) (n int, err error) {
3746
return c.stream.Read(b)
@@ -78,12 +87,3 @@ func (c *Conn) SetReadDeadline(t time.Time) error {
7887

7988
return nil
8089
}
81-
82-
// NewConn creates a new net/conn interface based on an underlying Kubernetes
83-
// API server proxy connection.
84-
func NewConn(connection httpstream.Connection, stream httpstream.Stream) *Conn {
85-
return &Conn{
86-
connection: connection,
87-
stream: stream,
88-
}
89-
}

pkg/rke2/workload_cluster.go

Lines changed: 67 additions & 67 deletions
Original file line numberDiff line numberDiff line change
@@ -198,19 +198,6 @@ type ClusterStatus struct {
198198
HasRKE2ServingSecret bool
199199
}
200200

201-
func (w *Workload) getControlPlaneNodes(ctx context.Context) (*corev1.NodeList, error) {
202-
nodes := &corev1.NodeList{}
203-
labels := map[string]string{
204-
labelNodeRoleControlPlane: "true",
205-
}
206-
207-
if err := w.List(ctx, nodes, ctrlclient.MatchingLabels(labels)); err != nil {
208-
return nil, err
209-
}
210-
211-
return nodes, nil
212-
}
213-
214201
// PatchNodes patches the nodes in the workload cluster.
215202
func (w *Workload) PatchNodes(ctx context.Context, cp *ControlPlane) error {
216203
errList := []error{}
@@ -399,6 +386,60 @@ func (w *Workload) UpdateAgentConditions(controlPlane *ControlPlane) {
399386
})
400387
}
401388

389+
// UpdateNodeMetadata is responsible for populating node metadata after
390+
// it is referenced from machine object.
391+
func (w *Workload) UpdateNodeMetadata(ctx context.Context, controlPlane *ControlPlane) error {
392+
for nodeName, machine := range controlPlane.Machines {
393+
if machine.Spec.Bootstrap.ConfigRef == nil {
394+
continue
395+
}
396+
397+
if machine.Status.NodeRef != nil {
398+
nodeName = machine.Status.NodeRef.Name
399+
}
400+
401+
conditions.MarkTrue(machine, controlplanev1.NodeMetadataUpToDate)
402+
403+
node, nodeFound := w.Nodes[nodeName]
404+
if !nodeFound {
405+
conditions.MarkUnknown(
406+
machine,
407+
controlplanev1.NodeMetadataUpToDate,
408+
controlplanev1.NodePatchFailedReason, "associated node not found")
409+
410+
continue
411+
} else if name, ok := node.Annotations[clusterv1.MachineAnnotation]; !ok || name != machine.Name {
412+
conditions.MarkUnknown(
413+
machine,
414+
controlplanev1.NodeMetadataUpToDate,
415+
controlplanev1.NodePatchFailedReason, fmt.Sprintf("node object is missing %s annotation", clusterv1.MachineAnnotation))
416+
417+
continue
418+
}
419+
420+
rkeConfig, found := controlPlane.Rke2Configs[machine.Name]
421+
if !found {
422+
conditions.MarkUnknown(
423+
machine,
424+
controlplanev1.NodeMetadataUpToDate,
425+
controlplanev1.NodePatchFailedReason, "associated RKE2 config not found")
426+
427+
continue
428+
}
429+
430+
annotations.AddAnnotations(node, rkeConfig.Spec.AgentConfig.NodeAnnotations)
431+
}
432+
433+
return w.PatchNodes(ctx, controlPlane)
434+
}
435+
436+
// UpdateEtcdConditions is responsible for updating machine conditions reflecting the status of all the etcd members.
437+
// This operation is best effort, in the sense that in case of problems in retrieving member status, it sets
438+
// the condition to Unknown state without returning any error.
439+
func (w *Workload) UpdateEtcdConditions(controlPlane *ControlPlane) {
440+
w.updateManagedEtcdConditions(controlPlane)
441+
}
442+
402443
type aggregateFromMachinesToRCPInput struct {
403444
controlPlane *ControlPlane
404445
machineConditions []clusterv1.ConditionType
@@ -409,6 +450,19 @@ type aggregateFromMachinesToRCPInput struct {
409450
note string
410451
}
411452

453+
func (w *Workload) getControlPlaneNodes(ctx context.Context) (*corev1.NodeList, error) {
454+
nodes := &corev1.NodeList{}
455+
labels := map[string]string{
456+
labelNodeRoleControlPlane: "true",
457+
}
458+
459+
if err := w.List(ctx, nodes, ctrlclient.MatchingLabels(labels)); err != nil {
460+
return nil, err
461+
}
462+
463+
return nodes, nil
464+
}
465+
412466
// aggregateFromMachinesToRCP aggregates a group of conditions from machines to RCP.
413467
// NOTE: this func follows the same aggregation rules used by conditions.Merge thus giving priority to
414468
// errors, then warning, info down to unknown.
@@ -510,13 +564,6 @@ func aggregateFromMachinesToRCP(input aggregateFromMachinesToRCPInput) {
510564
}
511565
}
512566

513-
// UpdateEtcdConditions is responsible for updating machine conditions reflecting the status of all the etcd members.
514-
// This operation is best effort, in the sense that in case of problems in retrieving member status, it sets
515-
// the condition to Unknown state without returning any error.
516-
func (w *Workload) UpdateEtcdConditions(controlPlane *ControlPlane) {
517-
w.updateManagedEtcdConditions(controlPlane)
518-
}
519-
520567
func (w *Workload) updateManagedEtcdConditions(controlPlane *ControlPlane) {
521568
// NOTE: This methods uses control plane nodes only to get in contact with etcd but then it relies on etcd
522569
// as ultimate source of truth for the list of members and for their health.
@@ -552,50 +599,3 @@ func (w *Workload) updateManagedEtcdConditions(controlPlane *ControlPlane) {
552599
conditions.MarkTrue(machine, controlplanev1.MachineEtcdMemberHealthyCondition)
553600
}
554601
}
555-
556-
// UpdateNodeMetadata is responsible for populating node metadata after
557-
// it is referenced from machine object.
558-
func (w *Workload) UpdateNodeMetadata(ctx context.Context, controlPlane *ControlPlane) error {
559-
for nodeName, machine := range controlPlane.Machines {
560-
if machine.Spec.Bootstrap.ConfigRef == nil {
561-
continue
562-
}
563-
564-
if machine.Status.NodeRef != nil {
565-
nodeName = machine.Status.NodeRef.Name
566-
}
567-
568-
conditions.MarkTrue(machine, controlplanev1.NodeMetadataUpToDate)
569-
570-
node, nodeFound := w.Nodes[nodeName]
571-
if !nodeFound {
572-
conditions.MarkUnknown(
573-
machine,
574-
controlplanev1.NodeMetadataUpToDate,
575-
controlplanev1.NodePatchFailedReason, "associated node not found")
576-
577-
continue
578-
} else if name, ok := node.Annotations[clusterv1.MachineAnnotation]; !ok || name != machine.Name {
579-
conditions.MarkUnknown(
580-
machine,
581-
controlplanev1.NodeMetadataUpToDate,
582-
controlplanev1.NodePatchFailedReason, fmt.Sprintf("node object is missing %s annotation", clusterv1.MachineAnnotation))
583-
584-
continue
585-
}
586-
587-
rkeConfig, found := controlPlane.Rke2Configs[machine.Name]
588-
if !found {
589-
conditions.MarkUnknown(
590-
machine,
591-
controlplanev1.NodeMetadataUpToDate,
592-
controlplanev1.NodePatchFailedReason, "associated RKE2 config not found")
593-
594-
continue
595-
}
596-
597-
annotations.AddAnnotations(node, rkeConfig.Spec.AgentConfig.NodeAnnotations)
598-
}
599-
600-
return w.PatchNodes(ctx, controlPlane)
601-
}

0 commit comments

Comments
 (0)