Skip to content

Commit 5e1a7ed

Browse files
authored
Merge pull request #198 from red-hat-storage/sync_us--main
Syncing latest changes from upstream main for ramen
2 parents 4fa3709 + 7d7c492 commit 5e1a7ed

7 files changed

+124
-68
lines changed

controllers/drcluster_controller.go

+45-10
Original file line numberDiff line numberDiff line change
@@ -149,22 +149,17 @@ func (r *DRClusterReconciler) drClusterConfigMapMapFunc(
149149
return requests
150150
}
151151

152-
func (r *DRClusterReconciler) drClusterSecretMapFunc(ctx context.Context, secret client.Object) []reconcile.Request {
153-
if secret.GetNamespace() != NamespaceName() {
152+
func (r *DRClusterReconciler) drClusterSecretMapFunc(ctx context.Context, obj client.Object) []reconcile.Request {
153+
if obj.GetNamespace() != NamespaceName() {
154154
return []reconcile.Request{}
155155
}
156156

157-
drcusters := &ramen.DRClusterList{}
158-
if err := r.Client.List(context.TODO(), drcusters); err != nil {
157+
secret, ok := obj.(*corev1.Secret)
158+
if !ok {
159159
return []reconcile.Request{}
160160
}
161161

162-
requests := make([]reconcile.Request, len(drcusters.Items))
163-
for i := range drcusters.Items {
164-
requests[i].Name = drcusters.Items[i].GetName()
165-
}
166-
167-
return requests
162+
return filterDRClusterSecret(ctx, r.Client, secret)
168163
}
169164

170165
// drpcPred watches for updates to the DRPC resource and checks if it requires an appropriate DRCluster reconcile
@@ -277,6 +272,46 @@ func filterDRClusterMCV(mcv *viewv1beta1.ManagedClusterView) []ctrl.Request {
277272
}
278273
}
279274

275+
func filterDRClusterSecret(ctx context.Context, reader client.Reader, secret *corev1.Secret) []ctrl.Request {
276+
log := ctrl.Log.WithName("filterDRClusterSecret").WithName("Secret")
277+
278+
drclusters := &ramen.DRClusterList{}
279+
if err := reader.List(ctx, drclusters); err != nil {
280+
log.Error(err, "Failed to list DRClusters")
281+
282+
return []reconcile.Request{}
283+
}
284+
285+
requests := []reconcile.Request{}
286+
287+
for i := range drclusters.Items {
288+
drcluster := &drclusters.Items[i]
289+
290+
s3ProfileName := drcluster.Spec.S3ProfileName
291+
292+
if s3ProfileName == NoS3StoreAvailable {
293+
continue
294+
}
295+
296+
s3StoreProfile, err := GetRamenConfigS3StoreProfile(context.TODO(), reader, s3ProfileName)
297+
if err != nil {
298+
log.Info("Failed to filter secret", "secret", secret.GetName(), "drcluster", drcluster.Name, "reason", err.Error())
299+
300+
continue
301+
}
302+
303+
if secret.GetName() == s3StoreProfile.S3SecretRef.Name {
304+
requests = append(requests,
305+
reconcile.Request{
306+
NamespacedName: types.NamespacedName{Name: drcluster.GetName()},
307+
},
308+
)
309+
}
310+
}
311+
312+
return requests
313+
}
314+
280315
//nolint:lll
281316
// +kubebuilder:rbac:groups=ramendr.openshift.io,resources=drclusters,verbs=get;list;watch;create;update;patch;delete
282317
// +kubebuilder:rbac:groups=ramendr.openshift.io,resources=drclusters/status,verbs=get;update;patch

controllers/drplacementcontrol_controller.go

+7-3
Original file line numberDiff line numberDiff line change
@@ -1666,6 +1666,7 @@ func (r *DRPlacementControlReconciler) getStatusCheckDelay(
16661666
return time.Until(beforeProcessing.Add(StatusCheckDelay))
16671667
}
16681668

1669+
//nolint:cyclop
16691670
func (r *DRPlacementControlReconciler) updateDRPCStatus(
16701671
ctx context.Context, drpc *rmn.DRPlacementControl, userPlacement client.Object, log logr.Logger,
16711672
) error {
@@ -1681,9 +1682,12 @@ func (r *DRPlacementControlReconciler) updateDRPCStatus(
16811682
r.updateResourceCondition(drpc, clusterDecision.ClusterName, vrgNamespace, log)
16821683
}
16831684

1684-
if err := r.setDRPCMetrics(ctx, drpc, log); err != nil {
1685-
// log the error but do not return the error
1686-
log.Info("failed to set drpc metrics", "errMSg", err)
1685+
// do not set metrics if DRPC is being deleted
1686+
if !isBeingDeleted(drpc, userPlacement) {
1687+
if err := r.setDRPCMetrics(ctx, drpc, log); err != nil {
1688+
// log the error but do not return the error
1689+
log.Info("failed to set drpc metrics", "errMSg", err)
1690+
}
16871691
}
16881692

16891693
for i, condition := range drpc.Status.Conditions {

controllers/volumereplicationgroup_controller.go

+55-39
Original file line numberDiff line numberDiff line change
@@ -578,55 +578,32 @@ func (v *VRGInstance) validateVRGMode() error {
578578
return nil
579579
}
580580

581-
func (v *VRGInstance) clusterDataRestore(result *ctrl.Result) error {
582-
if v.instance.Spec.PrepareForFinalSync || v.instance.Spec.RunFinalSync {
583-
msg := "PV restore skipped, as VRG is orchestrating final sync"
584-
setVRGClusterDataReadyCondition(&v.instance.Status.Conditions, v.instance.Generation, msg)
585-
586-
return nil
587-
}
581+
func (v *VRGInstance) clusterDataRestore(result *ctrl.Result) (int, error) {
582+
v.log.Info("Restoring PVs and PVCs")
588583

589-
clusterDataReady := findCondition(v.instance.Status.Conditions, VRGConditionTypeClusterDataReady)
590-
if clusterDataReady != nil {
591-
v.log.Info("ClusterDataReady condition",
592-
"status", clusterDataReady.Status,
593-
"reason", clusterDataReady.Reason,
594-
"message", clusterDataReady.Message,
595-
"observedGeneration", clusterDataReady.ObservedGeneration,
596-
"generation", v.instance.Generation,
597-
)
598-
599-
if clusterDataReady.Status == metav1.ConditionTrue &&
600-
clusterDataReady.ObservedGeneration == v.instance.Generation {
601-
v.log.Info("VRG's ClusterDataReady condition found. PV restore must have already been applied")
602-
603-
return nil
604-
}
605-
} else {
606-
v.log.Info("ClusterDataReady condition absent")
607-
}
608-
609-
err := v.restorePVsForVolSync()
584+
numRestoredForVS, err := v.restorePVsAndPVCsForVolSync()
610585
if err != nil {
611-
v.log.Info("VolSync PV restore failed")
586+
v.log.Info("VolSync PV/PVC restore failed")
612587

613-
result.Requeue = true
614-
615-
return fmt.Errorf("failed to restore PVs for VolSync (%w)", err)
588+
return numRestoredForVS, fmt.Errorf("failed to restore PV/PVC for VolSync (%w)", err)
616589
}
617590

618-
err = v.clusterDataRestoreForVolRep(result)
591+
numRestoredForVR, err := v.restorePVsAndPVCsForVolRep(result)
619592
if err != nil {
620-
v.log.Info("VolRep ClusterData restore failed")
593+
v.log.Info("VolRep PV/PVC restore failed")
621594

622-
return fmt.Errorf("failed to restore ClusterData for VolRep (%w)", err)
595+
return numRestoredForVS + numRestoredForVR, fmt.Errorf("failed to restore PV/PVC for VolRep (%w)", err)
623596
}
624597

625598
// Only after both succeed, we mark ClusterDataReady as true
626-
msg := "Restored cluster data"
599+
msg := "Restored PVs and PVCs"
600+
if numRestoredForVS+numRestoredForVR == 0 {
601+
msg = "Nothing to restore"
602+
}
603+
627604
setVRGClusterDataReadyCondition(&v.instance.Status.Conditions, v.instance.Generation, msg)
628605

629-
return nil
606+
return numRestoredForVS + numRestoredForVR, nil
630607
}
631608

632609
func (v *VRGInstance) listPVCsByVrgPVCSelector() (*corev1.PersistentVolumeClaimList, error) {
@@ -877,8 +854,18 @@ func (v *VRGInstance) processAsPrimary() ctrl.Result {
877854
return v.dataError(err, "PVCs deselected unprotect failed", v.result.Requeue)
878855
}
879856

880-
if err := v.clusterDataRestore(&v.result); err != nil {
881-
return v.clusterDataError(err, "Failed to restore PVs", v.result)
857+
if v.shouldRestoreClusterData() {
858+
v.result.Requeue = true
859+
860+
numOfRestoredRes, err := v.clusterDataRestore(&v.result)
861+
if err != nil {
862+
return v.clusterDataError(err, "Failed to restore PVs/PVCs", v.result)
863+
}
864+
865+
// Save status and requeue if we restored any resources (PV/PVCs). Otherwise, continue
866+
if numOfRestoredRes != 0 {
867+
return v.updateVRGConditionsAndStatus(v.result)
868+
}
882869
}
883870

884871
v.reconcileAsPrimary()
@@ -895,6 +882,35 @@ func (v *VRGInstance) processAsPrimary() ctrl.Result {
895882
return v.updateVRGConditionsAndStatus(v.result)
896883
}
897884

885+
func (v *VRGInstance) shouldRestoreClusterData() bool {
886+
if v.instance.Spec.PrepareForFinalSync || v.instance.Spec.RunFinalSync {
887+
msg := "PV restore skipped, as VRG is orchestrating final sync"
888+
setVRGClusterDataReadyCondition(&v.instance.Status.Conditions, v.instance.Generation, msg)
889+
890+
return false
891+
}
892+
893+
clusterDataReady := findCondition(v.instance.Status.Conditions, VRGConditionTypeClusterDataReady)
894+
if clusterDataReady != nil {
895+
v.log.Info("ClusterDataReady condition",
896+
"status", clusterDataReady.Status,
897+
"reason", clusterDataReady.Reason,
898+
"message", clusterDataReady.Message,
899+
"observedGeneration", clusterDataReady.ObservedGeneration,
900+
"generation", v.instance.Generation,
901+
)
902+
903+
if clusterDataReady.Status == metav1.ConditionTrue &&
904+
clusterDataReady.ObservedGeneration == v.instance.Generation {
905+
v.log.Info("VRG's ClusterDataReady condition found. PV restore must have already been applied")
906+
907+
return false
908+
}
909+
}
910+
911+
return true
912+
}
913+
898914
func (v *VRGInstance) reconcileAsPrimary() {
899915
var finalSyncPrepared struct {
900916
volSync bool

controllers/vrg_recipe_test.go

+2-2
Original file line numberDiff line numberDiff line change
@@ -269,7 +269,7 @@ var _ = Describe("VolumeReplicationGroupRecipe", func() {
269269
return matchers
270270
}
271271
vrgPvcsConsistOfEventually := func(pvcs ...*corev1.PersistentVolumeClaim) {
272-
Eventually(vrgPvcsGet).Should(ConsistOf(vrgPvcNamesMatchPvcs(pvcs...)))
272+
Eventually(vrgPvcsGet, timeout, interval).Should(ConsistOf(vrgPvcNamesMatchPvcs(pvcs...)))
273273
}
274274
vrgPvcSelectorGet := func() (controllers.PvcSelector, error) {
275275
return controllers.GetPVCSelector(ctx, apiReader, *vrg, *ramenConfig, testLogger)
@@ -538,7 +538,7 @@ var _ = Describe("VolumeReplicationGroupRecipe", func() {
538538
Expect(pvcSelector.NamespaceNames).To(ConsistOf(vrg.Namespace))
539539
})
540540
It("sets DataReady condition's message to something besides a recipe error", func() {
541-
Eventually(vrgDataReadyConditionGetAndExpectNonNil).Should(MatchFields(IgnoreExtras, Fields{
541+
Eventually(vrgDataReadyConditionGetAndExpectNonNil, timeout, interval).Should(MatchFields(IgnoreExtras, Fields{
542542
"Message": Not(HavePrefix(recipeErrorMessagePrefix)),
543543
}))
544544
})

controllers/vrg_volrep.go

+10-9
Original file line numberDiff line numberDiff line change
@@ -1817,30 +1817,31 @@ func (v *VRGInstance) s3KeyPrefix() string {
18171817
return S3KeyPrefix(v.namespacedName)
18181818
}
18191819

1820-
func (v *VRGInstance) clusterDataRestoreForVolRep(result *ctrl.Result) error {
1820+
func (v *VRGInstance) restorePVsAndPVCsForVolRep(result *ctrl.Result) (int, error) {
18211821
v.log.Info("Restoring VolRep PVs and PVCs")
18221822

18231823
if len(v.instance.Spec.S3Profiles) == 0 {
18241824
v.log.Info("No S3 profiles configured")
18251825

18261826
result.Requeue = true
18271827

1828-
return fmt.Errorf("no S3Profiles configured")
1828+
return 0, fmt.Errorf("no S3Profiles configured")
18291829
}
18301830

18311831
v.log.Info(fmt.Sprintf("Restoring PVs and PVCs to this managed cluster. ProfileList: %v", v.instance.Spec.S3Profiles))
18321832

1833-
if err := v.restorePVsAndPVCsFromS3(result); err != nil {
1833+
count, err := v.restorePVsAndPVCsFromS3(result)
1834+
if err != nil {
18341835
errMsg := fmt.Sprintf("failed to restore PVs and PVCs using profile list (%v)", v.instance.Spec.S3Profiles)
18351836
v.log.Info(errMsg)
18361837

1837-
return fmt.Errorf("%s: %w", errMsg, err)
1838+
return 0, fmt.Errorf("%s: %w", errMsg, err)
18381839
}
18391840

1840-
return nil
1841+
return count, nil
18411842
}
18421843

1843-
func (v *VRGInstance) restorePVsAndPVCsFromS3(result *ctrl.Result) error {
1844+
func (v *VRGInstance) restorePVsAndPVCsFromS3(result *ctrl.Result) (int, error) {
18441845
err := errors.New("s3Profiles empty")
18451846
NoS3 := false
18461847

@@ -1891,16 +1892,16 @@ func (v *VRGInstance) restorePVsAndPVCsFromS3(result *ctrl.Result) error {
18911892

18921893
v.log.Info(fmt.Sprintf("Restored %d PVs and %d PVCs using profile %s", pvCount, pvcCount, s3ProfileName))
18931894

1894-
return v.kubeObjectsRecover(result, s3StoreProfile, objectStore)
1895+
return pvCount + pvcCount, v.kubeObjectsRecover(result, s3StoreProfile, objectStore)
18951896
}
18961897

18971898
if NoS3 {
1898-
return nil
1899+
return 0, nil
18991900
}
19001901

19011902
result.Requeue = true
19021903

1903-
return err
1904+
return 0, err
19041905
}
19051906

19061907
func (v *VRGInstance) restorePVsFromObjectStore(objectStore ObjectStorer, s3ProfileName string) (int, error) {

controllers/vrg_volrep_test.go

+1-1
Original file line numberDiff line numberDiff line change
@@ -335,7 +335,7 @@ var _ = Describe("VolumeReplicationGroupVolRepController", func() {
335335
Expect(vrg.GetGeneration()).To(Equal(vrgGenerationNext))
336336

337337
return vrg.Status.ObservedGeneration
338-
}).Should(Equal(vrgGenerationNext))
338+
}, timeout, interval).Should(Equal(vrgGenerationNext))
339339
})
340340
It("sets PVC's namespace name in VRG status", func() {
341341
Expect(vrg.Status.ProtectedPVCs).To(HaveLen(len(t.pvcNames)))

controllers/vrg_volsync.go

+4-4
Original file line numberDiff line numberDiff line change
@@ -15,13 +15,13 @@ import (
1515
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
1616
)
1717

18-
func (v *VRGInstance) restorePVsForVolSync() error {
18+
func (v *VRGInstance) restorePVsAndPVCsForVolSync() (int, error) {
1919
v.log.Info("VolSync: Restoring VolSync PVs")
2020

2121
if len(v.instance.Spec.VolSync.RDSpec) == 0 {
2222
v.log.Info("No RDSpec entries. There are no PVCs to restore")
2323
// No ReplicationDestinations (i.e. no PVCs) to restore
24-
return nil
24+
return 0, nil
2525
}
2626

2727
numPVsRestored := 0
@@ -59,12 +59,12 @@ func (v *VRGInstance) restorePVsForVolSync() error {
5959
}
6060

6161
if numPVsRestored != len(v.instance.Spec.VolSync.RDSpec) {
62-
return fmt.Errorf("failed to restore all PVCs using RDSpec (%v)", v.instance.Spec.VolSync.RDSpec)
62+
return numPVsRestored, fmt.Errorf("failed to restore all PVCs using RDSpec (%v)", v.instance.Spec.VolSync.RDSpec)
6363
}
6464

6565
v.log.Info("Success restoring VolSync PVs", "Total", numPVsRestored)
6666

67-
return nil
67+
return numPVsRestored, nil
6868
}
6969

7070
func (v *VRGInstance) reconcileVolSyncAsPrimary(finalSyncPrepared *bool) (requeue bool) {

0 commit comments

Comments
 (0)