Skip to content

Commit 9bf7fe7

Browse files
committed
Skip VS and VSC not created by backup.
Signed-off-by: Xun Jiang <xun.jiang@broadcom.com>
1 parent b58dbcb commit 9bf7fe7

8 files changed

Lines changed: 245 additions & 161 deletions

File tree

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
Skip VS and VSC not created by backup.

pkg/backup/actions/csi/pvc_action.go

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -285,7 +285,6 @@ func (p *pvcBackupItemAction) Execute(
285285
vs,
286286
p.crClient,
287287
p.log,
288-
true,
289288
backup.Spec.CSISnapshotTimeout.Duration,
290289
)
291290
if err != nil {

pkg/backup/actions/csi/volumesnapshot_action.go

Lines changed: 46 additions & 67 deletions
Original file line numberDiff line numberDiff line change
@@ -96,30 +96,13 @@ func (p *volumeSnapshotBackupItemAction) Execute(
9696
},
9797
}
9898

99-
// determine if we are backing up a VolumeSnapshot that was created by velero while
100-
// performing backup of a CSI backed PVC.
101-
// For VolumeSnapshots that were created during the backup of a CSI backed PVC,
102-
// we will wait for the VolumeSnapshotContents to be available.
103-
// For VolumeSnapshots created outside of velero, we expect the VolumeSnapshotContent
104-
// to be available prior to backing up the VolumeSnapshot. In case of a failure,
105-
// backup should be re-attempted after the CSI driver has reconciled the VolumeSnapshot.
106-
// existence of the velerov1api.BackupNameLabel indicates that the VolumeSnapshot was
107-
// created while backing up a CSI backed PVC.
108-
109-
// We want to await reconciliation of only those VolumeSnapshots created during the
110-
// ongoing backup. For this we will wait only if the backup label exists on the
111-
// VolumeSnapshot object and the backup name is the same as that of the value of the
112-
// backup label.
113-
backupOngoing := vs.Labels[velerov1api.BackupNameLabel] == label.GetValidName(backup.Name)
114-
11599
p.log.Infof("Getting VolumesnapshotContent for Volumesnapshot %s/%s",
116100
vs.Namespace, vs.Name)
117101

118102
vsc, err := csi.WaitUntilVSCHandleIsReady(
119103
vs,
120104
p.crClient,
121105
p.log,
122-
backupOngoing,
123106
backup.Spec.CSISnapshotTimeout.Duration,
124107
)
125108
if err != nil {
@@ -171,42 +154,40 @@ func (p *volumeSnapshotBackupItemAction) Execute(
171154
}
172155
}
173156

174-
if backupOngoing {
175-
p.log.Infof("Patching VolumeSnapshotContent %s with velero BackupNameLabel",
176-
vsc.Name)
177-
// If we created the VolumeSnapshotContent object during this ongoing backup,
178-
// we would have created it with a DeletionPolicy of Retain.
179-
// But, we want to retain these VolumeSnapshotContent ONLY for the lifetime
180-
// of the backup. To that effect, during velero backup
181-
// deletion, we will update the DeletionPolicy of the VolumeSnapshotContent
182-
// and then delete the VolumeSnapshot object which will cascade delete the
183-
// VolumeSnapshotContent and the associated snapshot in the storage
184-
// provider (handled by the CSI driver and the CSI common controller).
185-
// However, in the event that the VolumeSnapshot object is deleted outside
186-
// of the backup deletion process, it is possible that the dynamically created
187-
// VolumeSnapshotContent object will be left as an orphaned and non-discoverable
188-
// resource in the cluster as well as in the storage provider. To avoid piling
189-
// up of such orphaned resources, we will want to discover and delete the
190-
// dynamically created VolumeSnapshotContent. We do that by adding
191-
// the "velero.io/backup-name" label on the VolumeSnapshotContent.
192-
// Further, we want to add this label only on VolumeSnapshotContents that
193-
// were created during an ongoing velero backup.
194-
originVSC := vsc.DeepCopy()
195-
kubeutil.AddLabels(
196-
&vsc.ObjectMeta,
197-
map[string]string{
198-
velerov1api.BackupNameLabel: label.GetValidName(backup.Name),
199-
},
200-
)
201-
202-
if vscPatchError := p.crClient.Patch(
203-
context.TODO(),
204-
vsc,
205-
crclient.MergeFrom(originVSC),
206-
); vscPatchError != nil {
207-
p.log.Warnf("Failed to patch VolumeSnapshotContent %s: %v",
208-
vsc.Name, vscPatchError)
209-
}
157+
p.log.Infof("Patching VolumeSnapshotContent %s with velero BackupNameLabel",
158+
vsc.Name)
159+
// If we created the VolumeSnapshotContent object during this ongoing backup,
160+
// we would have created it with a DeletionPolicy of Retain.
161+
// But, we want to retain these VolumeSnapshotContent ONLY for the lifetime
162+
// of the backup. To that effect, during velero backup
163+
// deletion, we will update the DeletionPolicy of the VolumeSnapshotContent
164+
// and then delete the VolumeSnapshot object which will cascade delete the
165+
// VolumeSnapshotContent and the associated snapshot in the storage
166+
// provider (handled by the CSI driver and the CSI common controller).
167+
// However, in the event that the VolumeSnapshot object is deleted outside
168+
// of the backup deletion process, it is possible that the dynamically created
169+
// VolumeSnapshotContent object will be left as an orphaned and non-discoverable
170+
// resource in the cluster as well as in the storage provider. To avoid piling
171+
// up of such orphaned resources, we will want to discover and delete the
172+
// dynamically created VolumeSnapshotContent. We do that by adding
173+
// the "velero.io/backup-name" label on the VolumeSnapshotContent.
174+
// Further, we want to add this label only on VolumeSnapshotContents that
175+
// were created during an ongoing velero backup.
176+
originVSC := vsc.DeepCopy()
177+
kubeutil.AddLabels(
178+
&vsc.ObjectMeta,
179+
map[string]string{
180+
velerov1api.BackupNameLabel: label.GetValidName(backup.Name),
181+
},
182+
)
183+
184+
if vscPatchError := p.crClient.Patch(
185+
context.TODO(),
186+
vsc,
187+
crclient.MergeFrom(originVSC),
188+
); vscPatchError != nil {
189+
p.log.Warnf("Failed to patch VolumeSnapshotContent %s: %v",
190+
vsc.Name, vscPatchError)
210191
}
211192
}
212193

@@ -244,20 +225,18 @@ func (p *volumeSnapshotBackupItemAction) Execute(
244225
var itemToUpdate []velero.ResourceIdentifier
245226

246227
// Only return Async operation for VSC created for this backup.
247-
if backupOngoing {
248-
// The operationID is of the form <namespace>/<volumesnapshot-name>/<started-time>
249-
operationID = vs.Namespace + "/" + vs.Name + "/" + time.Now().Format(time.RFC3339)
250-
itemToUpdate = []velero.ResourceIdentifier{
251-
{
252-
GroupResource: kuberesource.VolumeSnapshots,
253-
Namespace: vs.Namespace,
254-
Name: vs.Name,
255-
},
256-
{
257-
GroupResource: kuberesource.VolumeSnapshotContents,
258-
Name: vsc.Name,
259-
},
260-
}
228+
// The operationID is of the form <namespace>/<volumesnapshot-name>/<started-time>
229+
operationID = vs.Namespace + "/" + vs.Name + "/" + time.Now().Format(time.RFC3339)
230+
itemToUpdate = []velero.ResourceIdentifier{
231+
{
232+
GroupResource: kuberesource.VolumeSnapshots,
233+
Namespace: vs.Namespace,
234+
Name: vs.Name,
235+
},
236+
{
237+
GroupResource: kuberesource.VolumeSnapshotContents,
238+
Name: vsc.Name,
239+
},
261240
}
262241

263242
return &unstructured.Unstructured{Object: vsMap},

pkg/backup/actions/csi/volumesnapshot_action_test.go

Lines changed: 0 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -49,23 +49,6 @@ func TestVSExecute(t *testing.T) {
4949
expectedAdditionalItems []velero.ResourceIdentifier
5050
expectedItemToUpdate []velero.ResourceIdentifier
5151
}{
52-
{
53-
name: "VS not created by backup, has no status. Backup is finalizing",
54-
backup: builder.ForBackup("velero", "backup").
55-
Phase(velerov1api.BackupPhaseFinalizing).Result(),
56-
vs: builder.ForVolumeSnapshot("velero", "vs").
57-
VolumeSnapshotClass("class").Result(),
58-
expectedErr: "",
59-
},
60-
{
61-
name: "VS is not created by the backup, associated VSC not exists",
62-
backup: builder.ForBackup("velero", "backup").
63-
Phase(velerov1api.BackupPhaseInProgress).Result(),
64-
vs: builder.ForVolumeSnapshot("velero", "vs").
65-
VolumeSnapshotClass("class").Status().
66-
BoundVolumeSnapshotContentName("vsc").Result(),
67-
expectedErr: `error getting volume snapshot content from API: volumesnapshotcontents.snapshot.storage.k8s.io "vsc" not found`,
68-
},
6952
{
7053
name: "Normal case",
7154
backup: builder.ForBackup("velero", "backup").

pkg/controller/backup_controller.go

Lines changed: 82 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,7 @@ import (
2121
"context"
2222
"fmt"
2323
"os"
24+
"slices"
2425
"time"
2526

2627
snapshotv1api "github.com/kubernetes-csi/external-snapshotter/client/v7/apis/volumesnapshot/v1"
@@ -31,6 +32,7 @@ import (
3132
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
3233
"k8s.io/apimachinery/pkg/labels"
3334
kerrors "k8s.io/apimachinery/pkg/util/errors"
35+
"k8s.io/apimachinery/pkg/util/sets"
3436
"k8s.io/apimachinery/pkg/util/wait"
3537
"k8s.io/utils/clock"
3638
ctrl "sigs.k8s.io/controller-runtime"
@@ -63,6 +65,20 @@ const (
6365
backupResyncPeriod = time.Minute
6466
)
6567

68+
var nonBackupNamespaceScopedResources = []string{
69+
// CSI VolumeSnapshot and VolumeSnapshotContent are intermediate resources.
70+
// Velero only handle the VS and VSC created during backup,
71+
// not during resource collecting.
72+
"volumesnapshots.snapshot.storage.k8s.io",
73+
}
74+
75+
var nonBackupClusterScopedResources = []string{
76+
// CSI VolumeSnapshot and VolumeSnapshotContent are intermediate resources.
77+
// Velero only handle the VS and VSC created during backup,
78+
// not during resource collecting.
79+
"volumesnapshotcontents.snapshot.storage.k8s.io",
80+
}
81+
6682
type backupReconciler struct {
6783
ctx context.Context
6884
logger logrus.FieldLogger
@@ -481,19 +497,51 @@ func (b *backupReconciler) prepareBackupRequest(backup *velerov1api.Backup, logg
481497
request.Status.ValidationErrors = append(request.Status.ValidationErrors, validatedError)
482498
}
483499

484-
// validate the included/excluded resources
485-
for _, err := range collections.ValidateIncludesExcludes(request.Spec.IncludedResources, request.Spec.ExcludedResources) {
486-
request.Status.ValidationErrors = append(request.Status.ValidationErrors, fmt.Sprintf("Invalid included/excluded resource lists: %v", err))
487-
}
488-
489-
// validate the cluster-scoped included/excluded resources
490-
for _, err := range collections.ValidateScopedIncludesExcludes(request.Spec.IncludedClusterScopedResources, request.Spec.ExcludedClusterScopedResources) {
491-
request.Status.ValidationErrors = append(request.Status.ValidationErrors, fmt.Sprintf("Invalid cluster-scoped included/excluded resource lists: %s", err))
492-
}
500+
if collections.UseOldResourceFilters(request.Spec) {
501+
// validate the included/excluded resources
502+
ieErr := collections.ValidateIncludesExcludes(request.Spec.IncludedResources, request.Spec.ExcludedResources)
503+
if len(ieErr) > 0 {
504+
for _, err := range ieErr {
505+
request.Status.ValidationErrors = append(request.Status.ValidationErrors, fmt.Sprintf("Invalid included/excluded resource lists: %v", err))
506+
}
507+
} else {
508+
request.Spec.IncludedResources, request.Spec.ExcludedResources =
509+
modifyResourceIncludeExclude(
510+
request.Spec.IncludedResources,
511+
request.Spec.ExcludedResources,
512+
append(nonBackupNamespaceScopedResources, nonBackupClusterScopedResources...),
513+
)
514+
}
515+
} else {
516+
// validate the cluster-scoped included/excluded resources
517+
clusterErr := collections.ValidateScopedIncludesExcludes(request.Spec.IncludedClusterScopedResources, request.Spec.ExcludedClusterScopedResources)
518+
if len(clusterErr) > 0 {
519+
for _, err := range clusterErr {
520+
request.Status.ValidationErrors = append(request.Status.ValidationErrors, fmt.Sprintf("Invalid cluster-scoped included/excluded resource lists: %s", err))
521+
}
522+
} else {
523+
request.Spec.IncludedClusterScopedResources, request.Spec.ExcludedClusterScopedResources =
524+
modifyResourceIncludeExclude(
525+
request.Spec.IncludedClusterScopedResources,
526+
request.Spec.ExcludedClusterScopedResources,
527+
nonBackupClusterScopedResources,
528+
)
529+
}
493530

494-
// validate the namespace-scoped included/excluded resources
495-
for _, err := range collections.ValidateScopedIncludesExcludes(request.Spec.IncludedNamespaceScopedResources, request.Spec.ExcludedNamespaceScopedResources) {
496-
request.Status.ValidationErrors = append(request.Status.ValidationErrors, fmt.Sprintf("Invalid namespace-scoped included/excluded resource lists: %s", err))
531+
// validate the namespace-scoped included/excluded resources
532+
namespaceErr := collections.ValidateScopedIncludesExcludes(request.Spec.IncludedNamespaceScopedResources, request.Spec.ExcludedNamespaceScopedResources)
533+
if len(namespaceErr) > 0 {
534+
for _, err := range namespaceErr {
535+
request.Status.ValidationErrors = append(request.Status.ValidationErrors, fmt.Sprintf("Invalid namespace-scoped included/excluded resource lists: %s", err))
536+
}
537+
} else {
538+
request.Spec.IncludedNamespaceScopedResources, request.Spec.ExcludedNamespaceScopedResources =
539+
modifyResourceIncludeExclude(
540+
request.Spec.IncludedNamespaceScopedResources,
541+
request.Spec.ExcludedNamespaceScopedResources,
542+
nonBackupNamespaceScopedResources,
543+
)
544+
}
497545
}
498546

499547
// validate the included/excluded namespaces
@@ -932,3 +980,25 @@ func oldAndNewFilterParametersUsedTogether(backupSpec velerov1api.BackupSpec) bo
932980

933981
return haveOldResourceFilterParameters && haveNewResourceFilterParameters
934982
}
983+
984+
func modifyResourceIncludeExclude(include, exclude, addedExclude []string) (modifiedInclude, modifiedExclude []string) {
985+
modifiedInclude = include
986+
modifiedExclude = exclude
987+
988+
excludeStrSet := sets.NewString(exclude...)
989+
for _, ex := range addedExclude {
990+
if !excludeStrSet.Has(ex) {
991+
modifiedExclude = append(modifiedExclude, ex)
992+
}
993+
}
994+
995+
for _, exElem := range modifiedExclude {
996+
for inIndex, inElem := range modifiedInclude {
997+
if inElem == exElem {
998+
modifiedInclude = slices.Delete(modifiedInclude, inIndex, inIndex+1)
999+
}
1000+
}
1001+
}
1002+
1003+
return modifiedInclude, modifiedExclude
1004+
}

0 commit comments

Comments
 (0)