Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions internal/controllers/machinepool/machinepool_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -202,6 +202,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Re
}},
patch.WithOwnedConditions{Conditions: []string{
clusterv1.PausedCondition,
clusterv1.MachinesUpToDateCondition,
}},
}
if reterr == nil {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,9 @@ package machinepool
import (
"context"
"fmt"
"time"

metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/utils/ptr"
ctrl "sigs.k8s.io/controller-runtime"

Expand All @@ -41,7 +43,8 @@ func (r *Reconciler) updateStatus(ctx context.Context, s *scope) error {

setReplicas(s.machinePool, hasMachinePoolMachines, s.machines)

// TODO: in future add setting conditions here
// Set MachinesUpToDate condition to surface when machines are not up-to-date with the spec.
setMachinesUpToDateCondition(ctx, s.machinePool, s.machines, hasMachinePoolMachines)

return nil
}
Expand Down Expand Up @@ -73,3 +76,78 @@ func setReplicas(mp *clusterv1.MachinePool, hasMachinePoolMachines bool, machine
mp.Status.AvailableReplicas = ptr.To(availableReplicas)
mp.Status.UpToDateReplicas = ptr.To(upToDateReplicas)
}

// setMachinesUpToDateCondition sets the MachinesUpToDate condition on the MachinePool.
func setMachinesUpToDateCondition(ctx context.Context, mp *clusterv1.MachinePool, machines []*clusterv1.Machine, hasMachinePoolMachines bool) {
log := ctrl.LoggerFrom(ctx)

// For MachinePool providers that don't use Machine objects (like managed pools),
// we derive the MachinesUpToDate condition from the infrastructure status.
// If InfrastructureReady is False, machines are not up-to-date.
if !hasMachinePoolMachines {
// Check if infrastructure is ready by looking at the initialization status
if !ptr.Deref(mp.Status.Initialization.InfrastructureProvisioned, false) {
log.V(4).Info("setting MachinesUpToDate to false for machineless MachinePool: infrastructure not yet provisioned")
conditions.Set(mp, metav1.Condition{
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I wonder if its also worth adding a message to state that this pool is "machine less" and the infrastructure is not ready. Just to give more context.

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

That's a user-facing condition and so, isn't that a provider-specific implementation detail?
It might be useful for devs debugging the controller itself, but then they can go to logs as opposed to users going to the infrastructure resource when they see the infra not provisioned message.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

In that case adding a log with a high verbosity level may help with the debugging then

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Sounds good. Added the log!

Type: clusterv1.MachinesUpToDateCondition,
Status: metav1.ConditionFalse,
Reason: clusterv1.NotUpToDateReason,
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

instead of this, would suggest using MachinePool specific constant. I already see MachineDeploymentMachinesNotUpToDateReason and other existing. If something is not there, you can add a new constant to keep it consistent with others.

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

aha! Nice catch! But it looks like it's going to be a separate work as per https://github.com/kubernetes-sigs/cluster-api/blob/main/api/core/v1beta2/machinepool_types.go#L32-L65

Message: "Infrastructure is not yet provisioned",
})
return
}

conditions.Set(mp, metav1.Condition{
Type: clusterv1.MachinesUpToDateCondition,
Status: metav1.ConditionTrue,
Reason: clusterv1.UpToDateReason,
})
return
}

// Only consider Machines that have an UpToDate condition or are older than 10s.
// This is done to ensure the MachinesUpToDate condition doesn't flicker after a new Machine is created,
// because it can take a bit until the UpToDate condition is set on a new Machine.
filteredMachines := make([]*clusterv1.Machine, 0, len(machines))
for _, machine := range machines {
if conditions.Has(machine, clusterv1.MachineUpToDateCondition) || time.Since(machine.CreationTimestamp.Time) > 10*time.Second {
filteredMachines = append(filteredMachines, machine)
}
}

if len(filteredMachines) == 0 {
conditions.Set(mp, metav1.Condition{
Type: clusterv1.MachinesUpToDateCondition,
Status: metav1.ConditionTrue,
Reason: clusterv1.NoReplicasReason,
})
return
}

upToDateCondition, err := conditions.NewAggregateCondition(
filteredMachines, clusterv1.MachineUpToDateCondition,
conditions.TargetConditionType(clusterv1.MachinesUpToDateCondition),
// Using a custom merge strategy to override reasons applied during merge.
conditions.CustomMergeStrategy{
MergeStrategy: conditions.DefaultMergeStrategy(
conditions.ComputeReasonFunc(conditions.GetDefaultComputeMergeReasonFunc(
clusterv1.NotUpToDateReason,
clusterv1.UpToDateUnknownReason,
clusterv1.UpToDateReason,
)),
),
},
)
if err != nil {
log.Error(err, "Failed to aggregate Machine's UpToDate conditions")
conditions.Set(mp, metav1.Condition{
Type: clusterv1.MachinesUpToDateCondition,
Status: metav1.ConditionUnknown,
Reason: clusterv1.InternalErrorReason,
Message: "Please check controller logs for errors",
})
return
}

conditions.Set(mp, *upToDateCondition)
}
54 changes: 54 additions & 0 deletions internal/controllers/machinepool/machinepool_controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@ import (
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/tools/record"
"k8s.io/utils/ptr"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/cache/informertest"
Expand All @@ -45,6 +46,7 @@ import (
"sigs.k8s.io/cluster-api/controllers/external"
externalfake "sigs.k8s.io/cluster-api/controllers/external/fake"
"sigs.k8s.io/cluster-api/util"
"sigs.k8s.io/cluster-api/util/conditions"
v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1"
"sigs.k8s.io/cluster-api/util/test/builder"
)
Expand Down Expand Up @@ -1157,6 +1159,57 @@ func TestMachinePoolConditions(t *testing.T) {
g.Expect(infraReadyCondition.Status).To(Equal(corev1.ConditionFalse))
},
},
{
name: "MachinesUpToDate condition set when infrastructure ready",
dataSecretCreated: true,
infrastructureReady: true,
beforeFunc: func(_, _ *unstructured.Unstructured, mp *clusterv1.MachinePool, _ *corev1.NodeList) {
mp.Spec.ProviderIDList = []string{"azure://westus2/id-node-4", "aws://us-east-1/id-node-1"}
mp.Status.NodeRefs = []corev1.ObjectReference{
{Name: "node-1"},
{Name: "azure-node-4"},
}
mp.Status.Replicas = ptr.To(int32(2))
},
conditionAssertFunc: func(t *testing.T, getter v1beta1conditions.Getter) {
t.Helper()
g := NewWithT(t)

// Check that MachinesUpToDate condition is set (v1beta2 condition)
mp, ok := getter.(*clusterv1.MachinePool)
g.Expect(ok).To(BeTrue())
machinesUpToDateCondition := conditions.Get(mp, clusterv1.MachinesUpToDateCondition)
g.Expect(machinesUpToDateCondition).ToNot(BeNil())
g.Expect(machinesUpToDateCondition.Status).To(Equal(metav1.ConditionTrue))
g.Expect(machinesUpToDateCondition.Reason).To(Equal(clusterv1.UpToDateReason))
},
},
{
name: "MachinesUpToDate condition is false when infrastructure not ready",
dataSecretCreated: true,
infrastructureReady: false,
beforeFunc: func(_, _ *unstructured.Unstructured, mp *clusterv1.MachinePool, _ *corev1.NodeList) {
mp.Spec.ProviderIDList = []string{"azure://westus2/id-node-4", "aws://us-east-1/id-node-1"}
mp.Status.NodeRefs = []corev1.ObjectReference{
{Name: "node-1"},
{Name: "azure-node-4"},
}
mp.Status.Replicas = ptr.To(int32(2))
},
conditionAssertFunc: func(t *testing.T, getter v1beta1conditions.Getter) {
t.Helper()
g := NewWithT(t)

// Check that MachinesUpToDate condition is False when infrastructure is not ready
mp, ok := getter.(*clusterv1.MachinePool)
g.Expect(ok).To(BeTrue())
machinesUpToDateCondition := conditions.Get(mp, clusterv1.MachinesUpToDateCondition)
g.Expect(machinesUpToDateCondition).ToNot(BeNil())
g.Expect(machinesUpToDateCondition.Status).To(Equal(metav1.ConditionFalse))
g.Expect(machinesUpToDateCondition.Reason).To(Equal(clusterv1.NotUpToDateReason))
g.Expect(machinesUpToDateCondition.Message).To(Equal("Infrastructure is not yet provisioned"))
},
},
}

for _, tt := range testcases {
Expand Down Expand Up @@ -1187,6 +1240,7 @@ func TestMachinePoolConditions(t *testing.T) {
Client: clientFake,
APIReader: clientFake,
ClusterCache: clustercache.NewFakeClusterCache(clientFake, client.ObjectKey{Name: testCluster.Name, Namespace: testCluster.Namespace}),
recorder: record.NewFakeRecorder(32),
externalTracker: external.ObjectTracker{
Controller: externalfake.Controller{},
Cache: &informertest.FakeInformers{},
Expand Down
Loading