Skip to content

Commit 39c5c8b

Browse files
committed
Implement mitigation for managedFields issue with SSA in apiserver
Signed-off-by: Stefan Büringer buringerst@vmware.com
1 parent b17e1c4 commit 39c5c8b

File tree

19 files changed

+1847
-135
lines changed

19 files changed

+1847
-135
lines changed

controlplane/kubeadm/internal/controllers/controller.go

Lines changed: 76 additions & 50 deletions
Original file line numberDiff line numberDiff line change
@@ -444,14 +444,18 @@ func (r *KubeadmControlPlaneReconciler) reconcile(ctx context.Context, controlPl
444444
return result, err
445445
}
446446

447-
if err := r.syncMachines(ctx, controlPlane); err != nil {
447+
stopReconcile, err := r.syncMachines(ctx, controlPlane)
448+
if err != nil {
448449
// Note: If any of the calls got a NotFound error, it means that at least one Machine got deleted.
449450
// Let's return here so that the next Reconcile will get the updated list of Machines.
450451
if apierrors.IsNotFound(err) {
451452
return ctrl.Result{}, nil // Note: Requeue is not needed, changes to Machines trigger another reconcile.
452453
}
453454
return ctrl.Result{}, errors.Wrap(err, "failed to sync Machines")
454455
}
456+
if stopReconcile {
457+
return ctrl.Result{RequeueAfter: 1 * time.Second}, nil // Explicitly requeue as we are not watching all objects.
458+
}
455459

456460
// Aggregate the operational state of all the machines; while aggregating we are adding the
457461
// source ref (reason@machine/name) so the problem can be easily tracked down to its source machine.
@@ -845,16 +849,17 @@ func (r *KubeadmControlPlaneReconciler) ClusterToKubeadmControlPlane(_ context.C
845849
}
846850

847851
// syncMachines updates Machines, InfrastructureMachines and KubeadmConfigs to propagate in-place mutable fields from KCP.
848-
func (r *KubeadmControlPlaneReconciler) syncMachines(ctx context.Context, controlPlane *internal.ControlPlane) error {
852+
func (r *KubeadmControlPlaneReconciler) syncMachines(ctx context.Context, controlPlane *internal.ControlPlane) (bool, error) {
849853
patchHelpers := map[string]*patch.Helper{}
854+
var anyManagedFieldIssueMitigated bool
850855
for machineName := range controlPlane.Machines {
851856
m := controlPlane.Machines[machineName]
852857
// If the Machine is already being deleted, we only need to sync
853858
// the subset of fields that impact tearing down the Machine.
854859
if !m.DeletionTimestamp.IsZero() {
855860
patchHelper, err := patch.NewHelper(m, r.Client)
856861
if err != nil {
857-
return err
862+
return true, err
858863
}
859864

860865
// Set all other in-place mutable fields that impact the ability to tear down existing machines.
@@ -865,84 +870,105 @@ func (r *KubeadmControlPlaneReconciler) syncMachines(ctx context.Context, contro
865870

866871
// Note: We intentionally don't set "minReadySeconds" on Machines because we consider it enough to have machine availability driven by readiness of control plane components.
867872
if err := patchHelper.Patch(ctx, m); err != nil {
868-
return err
873+
return true, err
869874
}
870875

871876
controlPlane.Machines[machineName] = m
872877
patchHelper, err = patch.NewHelper(m, r.Client)
873878
if err != nil {
874-
return err
879+
return true, err
875880
}
876881
patchHelpers[machineName] = patchHelper
877882
continue
878883
}
879884

880-
// Update Machine to propagate in-place mutable fields from KCP.
881-
updatedMachine, err := r.updateMachine(ctx, m, controlPlane.KCP, controlPlane.Cluster)
882-
if err != nil {
883-
return errors.Wrapf(err, "failed to update Machine: %s", klog.KObj(m))
884-
}
885-
// Note: Ensure ControlPlane has the latest version of the Machine. This is required because
886-
// e.g. the in-place update code that is called later has to use the latest version of the Machine.
887-
controlPlane.Machines[machineName] = updatedMachine
888-
if _, ok := controlPlane.MachinesNotUpToDate[machineName]; ok {
889-
controlPlane.MachinesNotUpToDate[machineName] = updatedMachine
890-
}
891-
// Since the machine is updated, re-create the patch helper so that any subsequent
892-
// Patch calls use the correct base machine object to calculate the diffs.
893-
// Example: reconcileControlPlaneAndMachinesConditions patches the machine objects in a subsequent call
894-
// and, it should use the updated machine to calculate the diff.
895-
// Note: If the patchHelpers are not re-computed based on the new updated machines, subsequent
896-
// Patch calls will fail because the patch will be calculated based on an outdated machine and will error
897-
// because of outdated resourceVersion.
898-
// TODO: This should be cleaned-up to have a more streamline way of constructing and using patchHelpers.
899-
patchHelper, err := patch.NewHelper(updatedMachine, r.Client)
885+
managedFieldIssueMitigated, err := ssa.MitigateManagedFieldsIssue(ctx, r.Client, m, kcpManagerName)
900886
if err != nil {
901-
return err
887+
return true, err
888+
}
889+
anyManagedFieldIssueMitigated = anyManagedFieldIssueMitigated || managedFieldIssueMitigated
890+
if !anyManagedFieldIssueMitigated {
891+
// Update Machine to propagate in-place mutable fields from KCP.
892+
updatedMachine, err := r.updateMachine(ctx, m, controlPlane.KCP, controlPlane.Cluster)
893+
if err != nil {
894+
return true, errors.Wrapf(err, "failed to update Machine: %s", klog.KObj(m))
895+
}
896+
// Note: Ensure ControlPlane has the latest version of the Machine. This is required because
897+
// e.g. the in-place update code that is called later has to use the latest version of the Machine.
898+
controlPlane.Machines[machineName] = updatedMachine
899+
if _, ok := controlPlane.MachinesNotUpToDate[machineName]; ok {
900+
controlPlane.MachinesNotUpToDate[machineName] = updatedMachine
901+
}
902+
// Since the machine is updated, re-create the patch helper so that any subsequent
903+
// Patch calls use the correct base machine object to calculate the diffs.
904+
// Example: reconcileControlPlaneAndMachinesConditions patches the machine objects in a subsequent call
905+
// and, it should use the updated machine to calculate the diff.
906+
// Note: If the patchHelpers are not re-computed based on the new updated machines, subsequent
907+
// Patch calls will fail because the patch will be calculated based on an outdated machine and will error
908+
// because of outdated resourceVersion.
909+
// TODO: This should be cleaned-up to have a more streamline way of constructing and using patchHelpers.
910+
patchHelper, err := patch.NewHelper(updatedMachine, r.Client)
911+
if err != nil {
912+
return true, err
913+
}
914+
patchHelpers[machineName] = patchHelper
902915
}
903-
patchHelpers[machineName] = patchHelper
904916

905917
infraMachine, infraMachineFound := controlPlane.InfraResources[machineName]
906918
// Only update the InfraMachine if it is already found, otherwise just skip it.
907919
// This could happen e.g. if the cache is not up-to-date yet.
908920
if infraMachineFound {
909-
// Drop managedFields for manager:Update and capi-kubeadmcontrolplane:Apply for all objects created with CAPI <= v1.11.
910-
// Starting with CAPI v1.12 we have a new managedField structure where capi-kubeadmcontrolplane-metadata will own
911-
// labels and annotations and capi-kubeadmcontrolplane everything else.
912-
// Note: We have to call ssa.MigrateManagedFields for every Machine created with CAPI <= v1.11 once.
913-
// Given that this was introduced in CAPI v1.12 and our n-3 upgrade policy this can
914-
// be removed with CAPI v1.15.
915-
if err := ssa.MigrateManagedFields(ctx, r.Client, infraMachine, kcpManagerName, kcpMetadataManagerName); err != nil {
916-
return errors.Wrapf(err, "failed to clean up managedFields of InfrastructureMachine %s", klog.KObj(infraMachine))
921+
managedFieldIssueMitigated, err = ssa.MitigateManagedFieldsIssue(ctx, r.Client, infraMachine, kcpMetadataManagerName)
922+
if err != nil {
923+
return true, err
917924
}
918-
// Update in-place mutating fields on InfrastructureMachine.
919-
if err := r.updateLabelsAndAnnotations(ctx, infraMachine, infraMachine.GroupVersionKind(), controlPlane.KCP, controlPlane.Cluster); err != nil {
920-
return errors.Wrapf(err, "failed to update InfrastructureMachine %s", klog.KObj(infraMachine))
925+
anyManagedFieldIssueMitigated = anyManagedFieldIssueMitigated || managedFieldIssueMitigated
926+
if !anyManagedFieldIssueMitigated {
927+
// Drop managedFields for manager:Update and capi-kubeadmcontrolplane:Apply for all objects created with CAPI <= v1.11.
928+
// Starting with CAPI v1.12 we have a new managedField structure where capi-kubeadmcontrolplane-metadata will own
929+
// labels and annotations and capi-kubeadmcontrolplane everything else.
930+
// Note: We have to call ssa.MigrateManagedFields for every Machine created with CAPI <= v1.11 once.
931+
// Given that this was introduced in CAPI v1.12 and our n-3 upgrade policy this can
932+
// be removed with CAPI v1.15.
933+
if err := ssa.MigrateManagedFields(ctx, r.Client, infraMachine, kcpManagerName, kcpMetadataManagerName); err != nil {
934+
return true, errors.Wrapf(err, "failed to clean up managedFields of InfrastructureMachine %s", klog.KObj(infraMachine))
935+
}
936+
// Update in-place mutating fields on InfrastructureMachine.
937+
if err := r.updateLabelsAndAnnotations(ctx, infraMachine, infraMachine.GroupVersionKind(), controlPlane.KCP, controlPlane.Cluster); err != nil {
938+
return true, errors.Wrapf(err, "failed to update InfrastructureMachine %s", klog.KObj(infraMachine))
939+
}
921940
}
922941
}
923942

924943
kubeadmConfig, kubeadmConfigFound := controlPlane.KubeadmConfigs[machineName]
925944
// Only update the KubeadmConfig if it is already found, otherwise just skip it.
926945
// This could happen e.g. if the cache is not up-to-date yet.
927946
if kubeadmConfigFound {
928-
// Drop managedFields for manager:Update and capi-kubeadmcontrolplane:Apply for all objects created with CAPI <= v1.11.
929-
// Starting with CAPI v1.12 we have a new managedField structure where capi-kubeadmcontrolplane-metadata will own
930-
// labels and annotations and capi-kubeadmcontrolplane everything else.
931-
// Note: We have to call ssa.MigrateManagedFields for every Machine created with CAPI <= v1.11 once.
932-
// Given that this was introduced in CAPI v1.12 and our n-3 upgrade policy this can
933-
// be removed with CAPI v1.15.
934-
if err := ssa.MigrateManagedFields(ctx, r.Client, kubeadmConfig, kcpManagerName, kcpMetadataManagerName); err != nil {
935-
return errors.Wrapf(err, "failed to clean up managedFields of KubeadmConfig %s", klog.KObj(kubeadmConfig))
947+
managedFieldIssueMitigated, err = ssa.MitigateManagedFieldsIssue(ctx, r.Client, kubeadmConfig, kcpMetadataManagerName)
948+
if err != nil {
949+
return true, err
936950
}
937-
// Update in-place mutating fields on BootstrapConfig.
938-
if err := r.updateLabelsAndAnnotations(ctx, kubeadmConfig, bootstrapv1.GroupVersion.WithKind("KubeadmConfig"), controlPlane.KCP, controlPlane.Cluster); err != nil {
939-
return errors.Wrapf(err, "failed to update KubeadmConfig %s", klog.KObj(kubeadmConfig))
951+
anyManagedFieldIssueMitigated = anyManagedFieldIssueMitigated || managedFieldIssueMitigated
952+
if !anyManagedFieldIssueMitigated {
953+
// Drop managedFields for manager:Update and capi-kubeadmcontrolplane:Apply for all objects created with CAPI <= v1.11.
954+
// Starting with CAPI v1.12 we have a new managedField structure where capi-kubeadmcontrolplane-metadata will own
955+
// labels and annotations and capi-kubeadmcontrolplane everything else.
956+
// Note: We have to call ssa.MigrateManagedFields for every Machine created with CAPI <= v1.11 once.
957+
// Given that this was introduced in CAPI v1.12 and our n-3 upgrade policy this can
958+
// be removed with CAPI v1.15.
959+
if err := ssa.MigrateManagedFields(ctx, r.Client, kubeadmConfig, kcpManagerName, kcpMetadataManagerName); err != nil {
960+
return true, errors.Wrapf(err, "failed to clean up managedFields of KubeadmConfig %s", klog.KObj(kubeadmConfig))
961+
}
962+
// Update in-place mutating fields on BootstrapConfig.
963+
if err := r.updateLabelsAndAnnotations(ctx, kubeadmConfig, bootstrapv1.GroupVersion.WithKind("KubeadmConfig"), controlPlane.KCP, controlPlane.Cluster); err != nil {
964+
return true, errors.Wrapf(err, "failed to update KubeadmConfig %s", klog.KObj(kubeadmConfig))
965+
}
940966
}
941967
}
942968
}
943969
// Update the patch helpers.
944970
controlPlane.SetPatchHelpers(patchHelpers)
945-
return nil
971+
return anyManagedFieldIssueMitigated, nil
946972
}
947973

948974
// reconcileControlPlaneAndMachinesConditions is responsible of reconciling conditions reporting the status of static pods and

controlplane/kubeadm/internal/controllers/controller_test.go

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2069,7 +2069,9 @@ func TestKubeadmControlPlaneReconciler_syncMachines(t *testing.T) {
20692069
SecretCachingClient: secretCachingClient,
20702070
ssaCache: ssa.NewCache("test-controller"),
20712071
}
2072-
g.Expect(reconciler.syncMachines(ctx, controlPlane)).To(Succeed())
2072+
stopReconcile, err := reconciler.syncMachines(ctx, controlPlane)
2073+
g.Expect(err).ToNot(HaveOccurred())
2074+
g.Expect(stopReconcile).To(BeFalse())
20732075

20742076
updatedInPlaceMutatingMachine := inPlaceMutatingMachine.DeepCopy()
20752077
g.Eventually(func(g Gomega) {
@@ -2145,7 +2147,9 @@ func TestKubeadmControlPlaneReconciler_syncMachines(t *testing.T) {
21452147
kcp.Spec.MachineTemplate.Spec.Deletion.NodeDeletionTimeoutSeconds = duration10s
21462148
kcp.Spec.MachineTemplate.Spec.Deletion.NodeVolumeDetachTimeoutSeconds = duration10s
21472149
controlPlane.KCP = kcp
2148-
g.Expect(reconciler.syncMachines(ctx, controlPlane)).To(Succeed())
2150+
stopReconcile, err = reconciler.syncMachines(ctx, controlPlane)
2151+
g.Expect(err).ToNot(HaveOccurred())
2152+
g.Expect(stopReconcile).To(BeFalse())
21492153

21502154
// Verify in-place mutable fields are updated on the Machine.
21512155
updatedInPlaceMutatingMachine = inPlaceMutatingMachine.DeepCopy()

controlplane/kubeadm/internal/controllers/helpers_test.go

Lines changed: 51 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,7 @@ package controllers
1818

1919
import (
2020
"context"
21+
"encoding/json"
2122
"strings"
2223
"testing"
2324

@@ -28,6 +29,7 @@ import (
2829
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
2930
"k8s.io/apimachinery/pkg/runtime"
3031
"k8s.io/apimachinery/pkg/runtime/schema"
32+
"k8s.io/apimachinery/pkg/types"
3133
"k8s.io/client-go/tools/record"
3234
"k8s.io/utils/ptr"
3335
ctrl "sigs.k8s.io/controller-runtime"
@@ -457,7 +459,9 @@ func TestCloneConfigsAndGenerateMachineAndSyncMachines(t *testing.T) {
457459

458460
controlPlane, err := internal.NewControlPlane(ctx, r.managementCluster, r.Client, cluster, kcp, collections.FromMachines(&m))
459461
g.Expect(err).ToNot(HaveOccurred())
460-
g.Expect(r.syncMachines(ctx, controlPlane)).To(Succeed())
462+
stopReconcile, err := r.syncMachines(ctx, controlPlane)
463+
g.Expect(err).ToNot(HaveOccurred())
464+
g.Expect(stopReconcile).To(BeFalse())
461465

462466
// Verify managedFields again.
463467
infraObj, err = external.GetObjectFromContractVersionedRef(ctx, env.GetAPIReader(), m.Spec.InfrastructureRef, m.Namespace)
@@ -529,6 +533,52 @@ func TestCloneConfigsAndGenerateMachineAndSyncMachines(t *testing.T) {
529533
}
530534
}}`,
531535
}})))
536+
537+
// Purge managedFields from objects.
538+
jsonPatch := []map[string]interface{}{
539+
{
540+
"op": "replace",
541+
"path": "/metadata/managedFields",
542+
"value": []metav1.ManagedFieldsEntry{{}},
543+
},
544+
}
545+
patch, err := json.Marshal(jsonPatch)
546+
g.Expect(err).ToNot(HaveOccurred())
547+
for _, object := range []client.Object{&m, infraObj, kubeadmConfig} {
548+
g.Expect(env.Client.Patch(ctx, object, client.RawPatch(types.JSONPatchType, patch))).To(Succeed())
549+
g.Expect(object.GetManagedFields()).To(BeEmpty())
550+
}
551+
552+
// syncMachines to run mitigation code.
553+
controlPlane.Machines[m.Name] = &m
554+
controlPlane.InfraResources[infraObj.GetName()] = infraObj
555+
controlPlane.KubeadmConfigs[kubeadmConfig.Name] = kubeadmConfig
556+
stopReconcile, err = r.syncMachines(ctx, controlPlane)
557+
g.Expect(err).ToNot(HaveOccurred())
558+
g.Expect(stopReconcile).To(BeTrue())
559+
560+
// verify mitigation worked
561+
g.Expect(env.GetAPIReader().Get(ctx, client.ObjectKeyFromObject(&m), &m)).To(Succeed())
562+
g.Expect(cleanupTime(m.GetManagedFields())).To(ConsistOf(toManagedFields([]managedFieldEntry{{
563+
APIVersion: clusterv1.GroupVersion.String(),
564+
Manager: kcpManagerName, // matches manager of next Apply.
565+
Operation: metav1.ManagedFieldsOperationApply,
566+
FieldsV1: `{"f:metadata":{"f:name":{}}}`,
567+
}})))
568+
g.Expect(env.GetAPIReader().Get(ctx, client.ObjectKeyFromObject(infraObj), infraObj)).To(Succeed())
569+
g.Expect(cleanupTime(infraObj.GetManagedFields())).To(ConsistOf(toManagedFields([]managedFieldEntry{{
570+
APIVersion: infraObj.GetAPIVersion(),
571+
Manager: kcpMetadataManagerName, // matches manager of next Apply.
572+
Operation: metav1.ManagedFieldsOperationApply,
573+
FieldsV1: `{"f:metadata":{"f:name":{}}}`,
574+
}})))
575+
g.Expect(env.GetAPIReader().Get(ctx, client.ObjectKeyFromObject(kubeadmConfig), kubeadmConfig)).To(Succeed())
576+
g.Expect(cleanupTime(kubeadmConfig.GetManagedFields())).To(ConsistOf(toManagedFields([]managedFieldEntry{{
577+
APIVersion: bootstrapv1.GroupVersion.String(),
578+
Manager: kcpMetadataManagerName, // matches manager of next Apply.
579+
Operation: metav1.ManagedFieldsOperationApply,
580+
FieldsV1: `{"f:metadata":{"f:name":{}}}`,
581+
}})))
532582
}
533583

534584
func TestCloneConfigsAndGenerateMachineFailInfraMachineCreation(t *testing.T) {

controlplane/kubeadm/internal/controllers/scale.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -177,13 +177,13 @@ func (r *KubeadmControlPlaneReconciler) preflightChecks(ctx context.Context, con
177177
// Block when we expect an upgrade to be propagated for topology clusters.
178178
// NOTE: in case the cluster is performing an upgrade, allow creation of machines for the intermediate step.
179179
hasSameVersionOfCurrentUpgradeStep := false
180-
if version, ok := controlPlane.Cluster.GetAnnotations()[clusterv1.ClusterTopologyUpgradeStepAnnotation]; ok {
180+
if version, ok := controlPlane.Cluster.GetAnnotations()[clusterv1.ClusterTopologyUpgradeStepAnnotation]; ok && version != "" {
181181
hasSameVersionOfCurrentUpgradeStep = version == controlPlane.KCP.Spec.Version
182182
}
183183

184184
if controlPlane.Cluster.Spec.Topology.IsDefined() && controlPlane.Cluster.Spec.Topology.Version != controlPlane.KCP.Spec.Version && !hasSameVersionOfCurrentUpgradeStep {
185185
v := controlPlane.Cluster.Spec.Topology.Version
186-
if version, ok := controlPlane.Cluster.GetAnnotations()[clusterv1.ClusterTopologyUpgradeStepAnnotation]; ok {
186+
if version, ok := controlPlane.Cluster.GetAnnotations()[clusterv1.ClusterTopologyUpgradeStepAnnotation]; ok && version != "" {
187187
v = version
188188
}
189189
log.Info(fmt.Sprintf("Waiting for a version upgrade to %s to be propagated", v))

controlplane/kubeadm/internal/controllers/status.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -852,7 +852,7 @@ func getPreflightMessages(cluster *clusterv1.Cluster, preflightChecks internal.P
852852
additionalMessages := []string{}
853853
if preflightChecks.TopologyVersionMismatch {
854854
v := cluster.Spec.Topology.Version
855-
if version, ok := cluster.GetAnnotations()[clusterv1.ClusterTopologyUpgradeStepAnnotation]; ok {
855+
if version, ok := cluster.GetAnnotations()[clusterv1.ClusterTopologyUpgradeStepAnnotation]; ok && version != "" {
856856
v = version
857857
}
858858
additionalMessages = append(additionalMessages, fmt.Sprintf("* waiting for a version upgrade to %s to be propagated", v))

exp/topology/desiredstate/desired_state.go

Lines changed: 8 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -51,7 +51,6 @@ import (
5151
"sigs.k8s.io/cluster-api/internal/topology/selectors"
5252
"sigs.k8s.io/cluster-api/internal/webhooks"
5353
"sigs.k8s.io/cluster-api/util"
54-
"sigs.k8s.io/cluster-api/util/annotations"
5554
"sigs.k8s.io/cluster-api/util/cache"
5655
"sigs.k8s.io/cluster-api/util/conversion"
5756
)
@@ -734,15 +733,21 @@ func computeCluster(_ context.Context, s *scope.Scope, infrastructureCluster, co
734733
// NOTE, it is required to surface intermediate steps of the upgrade plan to allow creation of machines in KCP/MS.
735734
// TODO: consider if we want to surface the upgrade plan (or the list of desired versions) in cluster status;
736735
// TBD if the semantic of the new field can replace this annotation.
736+
if cluster.Annotations == nil {
737+
cluster.Annotations = map[string]string{}
738+
}
737739
if hooks.IsPending(runtimehooksv1.AfterClusterUpgrade, s.Current.Cluster) {
738740
// NOTE: to detect if we are at the beginning of an upgrade, we check if the intent to call the AfterClusterUpgrade is already tracked.
739741
controlPlaneVersion, err := contract.ControlPlane().Version().Get(controlPlane)
740742
if err != nil {
741743
return nil, errors.Wrap(err, "error getting control plane version")
742744
}
743-
annotations.AddAnnotations(cluster, map[string]string{clusterv1.ClusterTopologyUpgradeStepAnnotation: *controlPlaneVersion})
745+
cluster.Annotations[clusterv1.ClusterTopologyUpgradeStepAnnotation] = *controlPlaneVersion
744746
} else {
745-
delete(cluster.Annotations, clusterv1.ClusterTopologyUpgradeStepAnnotation)
747+
// Note: Setting the annotation to "" instead of deleting it because we cannot be sure
748+
// that we are able to remove the annotation from the Cluster with SSA if we lost ownership of
749+
// the annotation in managedFields e.g. because of: https://github.com/kubernetes/kubernetes/issues/136919.
750+
cluster.Annotations[clusterv1.ClusterTopologyUpgradeStepAnnotation] = ""
746751
}
747752

748753
return cluster, nil

0 commit comments

Comments
 (0)