Skip to content

Commit af616d2

Browse files
committed
Remove Restic code path from PodVolumeRestore.
Signed-off-by: Xun Jiang <xun.jiang@broadcom.com>
1 parent fc6361b commit af616d2

9 files changed

Lines changed: 63 additions & 524 deletions

File tree

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
Remove Restic code path from PodVolumeRestore.

cmd/velero-restore-helper/velero-restore-helper.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,7 @@ func main() {
3535
for {
3636
<-ticker.C
3737
if done() {
38-
fmt.Println("All restic restores are done")
38+
fmt.Println("All FS restores are done")
3939
err := removeFolder()
4040
if err != nil {
4141
fmt.Println(err)

internal/resourcepolicies/resource_policies.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,7 @@ const (
3838
ConfigmapRefType string = "configmap"
3939
// skip action implies the volume would be skipped from the backup operation
4040
Skip VolumeActionType = "skip"
41-
// fs-backup action implies that the volume would be backed up via file system copy method using the uploader(kopia/restic) configured by the user
41+
// fs-backup action implies that the volume would be backed up via file system copy method using the uploader(kopia) configured by the user
4242
FSBackup VolumeActionType = "fs-backup"
4343
// snapshot action can have 3 different meaning based on velero configuration and backup spec - cloud provider based snapshots, local csi snapshots and datamover snapshots
4444
Snapshot VolumeActionType = "snapshot"

pkg/cmd/cli/nodeagent/server.go

Lines changed: 0 additions & 48 deletions
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,6 @@ import (
3737
"k8s.io/apimachinery/pkg/fields"
3838
"k8s.io/apimachinery/pkg/labels"
3939
"k8s.io/apimachinery/pkg/runtime"
40-
"k8s.io/apimachinery/pkg/types"
4140
"k8s.io/apimachinery/pkg/util/sets"
4241
"k8s.io/client-go/kubernetes"
4342
cacheutil "k8s.io/client-go/tools/cache"
@@ -430,10 +429,6 @@ func (s *nodeAgentServer) run() {
430429
s.logger.WithError(err).Fatal("Unable to create the pod volume restore controller")
431430
}
432431

433-
if err := controller.InitLegacyPodVolumeRestoreReconciler(s.mgr.GetClient(), s.mgr, s.kubeClient, s.dataPathMgr, s.namespace, s.config.resourceTimeout, s.logger); err != nil {
434-
s.logger.WithError(err).Fatal("Unable to create the legacy pod volume restore controller")
435-
}
436-
437432
dataUploadReconciler := controller.NewDataUploadReconciler(
438433
s.mgr.GetClient(),
439434
s.mgr,
@@ -509,8 +504,6 @@ func (s *nodeAgentServer) run() {
509504
if err := pvrReconciler.AttemptPVRResume(s.ctx, s.logger.WithField("node", s.nodeName), s.namespace); err != nil {
510505
s.logger.WithError(errors.WithStack(err)).Error("Failed to attempt PVR resume")
511506
}
512-
513-
s.markLegacyPVRsFailed(s.mgr.GetClient())
514507
}()
515508

516509
s.logger.Info("Controllers starting...")
@@ -604,47 +597,6 @@ func (s *nodeAgentServer) validatePodVolumesHostPath(client kubernetes.Interface
604597
return nil
605598
}
606599

607-
func (s *nodeAgentServer) markLegacyPVRsFailed(client ctrlclient.Client) {
608-
pvrs := &velerov1api.PodVolumeRestoreList{}
609-
if err := client.List(s.ctx, pvrs, &ctrlclient.ListOptions{Namespace: s.namespace}); err != nil {
610-
s.logger.WithError(errors.WithStack(err)).Error("failed to list podvolumerestores")
611-
return
612-
}
613-
614-
for i, pvr := range pvrs.Items {
615-
if !controller.IsLegacyPVR(&pvr) {
616-
continue
617-
}
618-
619-
if pvr.Status.Phase != velerov1api.PodVolumeRestorePhaseInProgress {
620-
s.logger.Debugf("the status of podvolumerestore %q is %q, skip", pvr.GetName(), pvr.Status.Phase)
621-
continue
622-
}
623-
624-
pod := &corev1api.Pod{}
625-
if err := client.Get(s.ctx, types.NamespacedName{
626-
Namespace: pvr.Spec.Pod.Namespace,
627-
Name: pvr.Spec.Pod.Name,
628-
}, pod); err != nil {
629-
s.logger.WithError(errors.WithStack(err)).Errorf("failed to get pod \"%s/%s\" of podvolumerestore %q",
630-
pvr.Spec.Pod.Namespace, pvr.Spec.Pod.Name, pvr.GetName())
631-
continue
632-
}
633-
if pod.Spec.NodeName != s.nodeName {
634-
s.logger.Debugf("the node of pod referenced by podvolumerestore %q is %q, not %q, skip", pvr.GetName(), pod.Spec.NodeName, s.nodeName)
635-
continue
636-
}
637-
638-
if err := controller.UpdatePVRStatusToFailed(s.ctx, client, &pvrs.Items[i], errors.New("cannot survive from node-agent restart"),
639-
fmt.Sprintf("get a legacy podvolumerestore with status %q during the server starting, mark it as %q", velerov1api.PodVolumeRestorePhaseInProgress, velerov1api.PodVolumeRestorePhaseFailed),
640-
time.Now(), s.logger); err != nil {
641-
s.logger.WithError(errors.WithStack(err)).Errorf("failed to patch podvolumerestore %q", pvr.GetName())
642-
continue
643-
}
644-
s.logger.WithField("podvolumerestore", pvr.GetName()).Warn(pvr.Status.Message)
645-
}
646-
}
647-
648600
var getConfigsFunc = nodeagent.GetConfigs
649601

650602
func (s *nodeAgentServer) getDataPathConfigs() error {

pkg/cmd/server/server.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1164,8 +1164,8 @@ func markPodVolumeRestoresCancel(ctx context.Context, client ctrlclient.Client,
11641164

11651165
for i := range pvrs.Items {
11661166
pvr := pvrs.Items[i]
1167-
if controller.IsLegacyPVR(&pvr) {
1168-
log.WithField("PVR", pvr.GetName()).Warn("Found a legacy PVR during velero server restart, cannot stop it")
1167+
if _, err := uploader.ValidateUploaderType(pvr.Spec.UploaderType); err != nil {
1168+
log.WithField("PVR", pvr.Name).Warnf("invalid uploader type %s, skip marking cancel for this PVR", pvr.Spec.UploaderType)
11691169
continue
11701170
}
11711171

pkg/controller/pod_volume_restore_controller.go

Lines changed: 19 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -147,6 +147,15 @@ func (r *PodVolumeRestoreReconciler) Reconcile(ctx context.Context, req ctrl.Req
147147
log = log.WithField("restore", fmt.Sprintf("%s/%s", pvr.Namespace, pvr.OwnerReferences[0].Name))
148148
}
149149

150+
invalidUploaderType := false
151+
if _, err := uploader.ValidateUploaderType(pvr.Spec.UploaderType); err != nil {
152+
log.WithField("uploaderType", pvr.Spec.UploaderType).Info("Skip PVR with invalid uploader type")
153+
invalidUploaderType = true
154+
}
155+
if invalidUploaderType {
156+
return ctrl.Result{}, nil
157+
}
158+
150159
// Logic for clear resources when pvr been deleted
151160
if !isPVRInFinalState(pvr) {
152161
if !controllerutil.ContainsFinalizer(pvr, PodVolumeFinalizer) {
@@ -603,7 +612,7 @@ func (r *PodVolumeRestoreReconciler) closeDataPath(ctx context.Context, pvrName
603612
func (r *PodVolumeRestoreReconciler) SetupWithManager(mgr ctrl.Manager) error {
604613
gp := kube.NewGenericEventPredicate(func(object client.Object) bool {
605614
pvr := object.(*velerov1api.PodVolumeRestore)
606-
if IsLegacyPVR(pvr) {
615+
if _, err := uploader.ValidateUploaderType(pvr.Spec.UploaderType); err != nil {
607616
return false
608617
}
609618

@@ -628,7 +637,8 @@ func (r *PodVolumeRestoreReconciler) SetupWithManager(mgr ctrl.Manager) error {
628637

629638
pred := kube.NewAllEventPredicate(func(obj client.Object) bool {
630639
pvr := obj.(*velerov1api.PodVolumeRestore)
631-
return !IsLegacyPVR(pvr)
640+
_, err := uploader.ValidateUploaderType(pvr.Spec.UploaderType)
641+
return err == nil
632642
})
633643

634644
return ctrl.NewControllerManagedBy(mgr).
@@ -678,7 +688,7 @@ func (r *PodVolumeRestoreReconciler) findPVRForTargetPod(ctx context.Context, po
678688

679689
requests := []reconcile.Request{}
680690
for _, item := range list.Items {
681-
if IsLegacyPVR(&item) {
691+
if _, err := uploader.ValidateUploaderType(item.Spec.UploaderType); err != nil {
682692
continue
683693
}
684694

@@ -708,6 +718,11 @@ func (r *PodVolumeRestoreReconciler) findPVRForRestorePod(ctx context.Context, p
708718
"PVR": pvr.Name,
709719
})
710720

721+
if _, err := uploader.ValidateUploaderType(pvr.Spec.UploaderType); err != nil {
722+
log.WithField("uploaderType", pvr.Spec.UploaderType).Debug("skip PVR with invalid uploader type")
723+
return []reconcile.Request{}
724+
}
725+
711726
if pvr.Status.Phase != velerov1api.PodVolumeRestorePhaseAccepted {
712727
return []reconcile.Request{}
713728
}
@@ -1029,7 +1044,7 @@ func (r *PodVolumeRestoreReconciler) AttemptPVRResume(ctx context.Context, logge
10291044

10301045
for i := range pvrs.Items {
10311046
pvr := &pvrs.Items[i]
1032-
if IsLegacyPVR(pvr) {
1047+
if _, err := uploader.ValidateUploaderType(pvr.Spec.UploaderType); err != nil {
10331048
continue
10341049
}
10351050

0 commit comments

Comments
 (0)