diff --git a/internal/controllers/machinepool/machinepool_controller.go b/internal/controllers/machinepool/machinepool_controller.go index 5eb6f6dc5386..b5a653e25dbf 100644 --- a/internal/controllers/machinepool/machinepool_controller.go +++ b/internal/controllers/machinepool/machinepool_controller.go @@ -202,6 +202,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Re }}, patch.WithOwnedConditions{Conditions: []string{ clusterv1.PausedCondition, + clusterv1.MachinesUpToDateCondition, }}, } if reterr == nil { diff --git a/internal/controllers/machinepool/machinepool_controller_status.go b/internal/controllers/machinepool/machinepool_controller_status.go index 24d23c15272c..99a287b5cc81 100644 --- a/internal/controllers/machinepool/machinepool_controller_status.go +++ b/internal/controllers/machinepool/machinepool_controller_status.go @@ -19,7 +19,9 @@ package machinepool import ( "context" "fmt" + "time" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/utils/ptr" ctrl "sigs.k8s.io/controller-runtime" @@ -41,7 +43,8 @@ func (r *Reconciler) updateStatus(ctx context.Context, s *scope) error { setReplicas(s.machinePool, hasMachinePoolMachines, s.machines) - // TODO: in future add setting conditions here + // Set MachinesUpToDate condition to surface when machines are not up-to-date with the spec. + setMachinesUpToDateCondition(ctx, s.machinePool, s.machines, hasMachinePoolMachines) return nil } @@ -73,3 +76,78 @@ func setReplicas(mp *clusterv1.MachinePool, hasMachinePoolMachines bool, machine mp.Status.AvailableReplicas = ptr.To(availableReplicas) mp.Status.UpToDateReplicas = ptr.To(upToDateReplicas) } + +// setMachinesUpToDateCondition sets the MachinesUpToDate condition on the MachinePool. +func setMachinesUpToDateCondition(ctx context.Context, mp *clusterv1.MachinePool, machines []*clusterv1.Machine, hasMachinePoolMachines bool) { + log := ctrl.LoggerFrom(ctx) + + // For MachinePool providers that don't use Machine objects (like managed pools), + // we derive the MachinesUpToDate condition from the infrastructure status. + // If InfrastructureReady is False, machines are not up-to-date. + if !hasMachinePoolMachines { + // Check if infrastructure is ready by looking at the initialization status + if !ptr.Deref(mp.Status.Initialization.InfrastructureProvisioned, false) { + log.V(4).Info("setting MachinesUpToDate to false for machineless MachinePool: infrastructure not yet provisioned") + conditions.Set(mp, metav1.Condition{ + Type: clusterv1.MachinesUpToDateCondition, + Status: metav1.ConditionFalse, + Reason: clusterv1.NotUpToDateReason, + Message: "Infrastructure is not yet provisioned", + }) + return + } + + conditions.Set(mp, metav1.Condition{ + Type: clusterv1.MachinesUpToDateCondition, + Status: metav1.ConditionTrue, + Reason: clusterv1.UpToDateReason, + }) + return + } + + // Only consider Machines that have an UpToDate condition or are older than 10s. + // This is done to ensure the MachinesUpToDate condition doesn't flicker after a new Machine is created, + // because it can take a bit until the UpToDate condition is set on a new Machine. + filteredMachines := make([]*clusterv1.Machine, 0, len(machines)) + for _, machine := range machines { + if conditions.Has(machine, clusterv1.MachineUpToDateCondition) || time.Since(machine.CreationTimestamp.Time) > 10*time.Second { + filteredMachines = append(filteredMachines, machine) + } + } + + if len(filteredMachines) == 0 { + conditions.Set(mp, metav1.Condition{ + Type: clusterv1.MachinesUpToDateCondition, + Status: metav1.ConditionTrue, + Reason: clusterv1.NoReplicasReason, + }) + return + } + + upToDateCondition, err := conditions.NewAggregateCondition( + filteredMachines, clusterv1.MachineUpToDateCondition, + conditions.TargetConditionType(clusterv1.MachinesUpToDateCondition), + // Using a custom merge strategy to override reasons applied during merge. + conditions.CustomMergeStrategy{ + MergeStrategy: conditions.DefaultMergeStrategy( + conditions.ComputeReasonFunc(conditions.GetDefaultComputeMergeReasonFunc( + clusterv1.NotUpToDateReason, + clusterv1.UpToDateUnknownReason, + clusterv1.UpToDateReason, + )), + ), + }, + ) + if err != nil { + log.Error(err, "Failed to aggregate Machine's UpToDate conditions") + conditions.Set(mp, metav1.Condition{ + Type: clusterv1.MachinesUpToDateCondition, + Status: metav1.ConditionUnknown, + Reason: clusterv1.InternalErrorReason, + Message: "Please check controller logs for errors", + }) + return + } + + conditions.Set(mp, *upToDateCondition) +} diff --git a/internal/controllers/machinepool/machinepool_controller_test.go b/internal/controllers/machinepool/machinepool_controller_test.go index 851beadd5cd5..5542c0689818 100644 --- a/internal/controllers/machinepool/machinepool_controller_test.go +++ b/internal/controllers/machinepool/machinepool_controller_test.go @@ -31,6 +31,7 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" clientgoscheme "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/tools/record" "k8s.io/utils/ptr" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/cache/informertest" @@ -45,6 +46,7 @@ import ( "sigs.k8s.io/cluster-api/controllers/external" externalfake "sigs.k8s.io/cluster-api/controllers/external/fake" "sigs.k8s.io/cluster-api/util" + "sigs.k8s.io/cluster-api/util/conditions" v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" "sigs.k8s.io/cluster-api/util/test/builder" ) @@ -1157,6 +1159,57 @@ func TestMachinePoolConditions(t *testing.T) { g.Expect(infraReadyCondition.Status).To(Equal(corev1.ConditionFalse)) }, }, + { + name: "MachinesUpToDate condition set when infrastructure ready", + dataSecretCreated: true, + infrastructureReady: true, + beforeFunc: func(_, _ *unstructured.Unstructured, mp *clusterv1.MachinePool, _ *corev1.NodeList) { + mp.Spec.ProviderIDList = []string{"azure://westus2/id-node-4", "aws://us-east-1/id-node-1"} + mp.Status.NodeRefs = []corev1.ObjectReference{ + {Name: "node-1"}, + {Name: "azure-node-4"}, + } + mp.Status.Replicas = ptr.To(int32(2)) + }, + conditionAssertFunc: func(t *testing.T, getter v1beta1conditions.Getter) { + t.Helper() + g := NewWithT(t) + + // Check that MachinesUpToDate condition is set (v1beta2 condition) + mp, ok := getter.(*clusterv1.MachinePool) + g.Expect(ok).To(BeTrue()) + machinesUpToDateCondition := conditions.Get(mp, clusterv1.MachinesUpToDateCondition) + g.Expect(machinesUpToDateCondition).ToNot(BeNil()) + g.Expect(machinesUpToDateCondition.Status).To(Equal(metav1.ConditionTrue)) + g.Expect(machinesUpToDateCondition.Reason).To(Equal(clusterv1.UpToDateReason)) + }, + }, + { + name: "MachinesUpToDate condition is false when infrastructure not ready", + dataSecretCreated: true, + infrastructureReady: false, + beforeFunc: func(_, _ *unstructured.Unstructured, mp *clusterv1.MachinePool, _ *corev1.NodeList) { + mp.Spec.ProviderIDList = []string{"azure://westus2/id-node-4", "aws://us-east-1/id-node-1"} + mp.Status.NodeRefs = []corev1.ObjectReference{ + {Name: "node-1"}, + {Name: "azure-node-4"}, + } + mp.Status.Replicas = ptr.To(int32(2)) + }, + conditionAssertFunc: func(t *testing.T, getter v1beta1conditions.Getter) { + t.Helper() + g := NewWithT(t) + + // Check that MachinesUpToDate condition is False when infrastructure is not ready + mp, ok := getter.(*clusterv1.MachinePool) + g.Expect(ok).To(BeTrue()) + machinesUpToDateCondition := conditions.Get(mp, clusterv1.MachinesUpToDateCondition) + g.Expect(machinesUpToDateCondition).ToNot(BeNil()) + g.Expect(machinesUpToDateCondition.Status).To(Equal(metav1.ConditionFalse)) + g.Expect(machinesUpToDateCondition.Reason).To(Equal(clusterv1.NotUpToDateReason)) + g.Expect(machinesUpToDateCondition.Message).To(Equal("Infrastructure is not yet provisioned")) + }, + }, } for _, tt := range testcases { @@ -1187,6 +1240,7 @@ func TestMachinePoolConditions(t *testing.T) { Client: clientFake, APIReader: clientFake, ClusterCache: clustercache.NewFakeClusterCache(clientFake, client.ObjectKey{Name: testCluster.Name, Namespace: testCluster.Namespace}), + recorder: record.NewFakeRecorder(32), externalTracker: external.ObjectTracker{ Controller: externalfake.Controller{}, Cache: &informertest.FakeInformers{},