Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion hack/build-image/Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ RUN go install sigs.k8s.io/controller-tools/cmd/controller-gen@v0.16.5

# get goimports (the revision is pinned so we don't indiscriminately update, but the particular commit
# is not important)
RUN go install golang.org/x/tools/cmd/goimports@11e9d9cc0042e6bd10337d4d2c3e5d9295508e7d
RUN go install golang.org/x/tools/cmd/goimports@v0.33.0

# get protoc compiler and golang plugin
WORKDIR /root
Expand Down
5 changes: 4 additions & 1 deletion test/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -119,6 +119,8 @@ VELERO_SERVER_DEBUG_MODE ?= false

ITEM_BLOCK_WORKER_COUNT ?= 1

WORKER_OS ?= linux

# Parameters to run migration tests along with all other E2E tests, and both of them should
# be provided or left them all empty to skip migration tests with no influence to other
# E2E tests.
Expand Down Expand Up @@ -221,7 +223,8 @@ run-e2e: ginkgo
--standby-cls-service-account-name=$(STANDBY_CLS_SERVICE_ACCOUNT_NAME) \
--kibishii-directory=$(KIBISHII_DIRECTORY) \
--disable-informer-cache=$(DISABLE_INFORMER_CACHE) \
--image-registry-proxy=$(IMAGE_REGISTRY_PROXY)
--image-registry-proxy=$(IMAGE_REGISTRY_PROXY) \
--worker-os=$(WORKER_OS)

.PHONY: run-perf
run-perf: ginkgo
Expand Down
2 changes: 2 additions & 0 deletions test/e2e/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -79,6 +79,7 @@ These configuration parameters are expected as values to the following command l
1. `--debug-velero-pod-restart`: A switch for debugging velero pod restart.
1. `--fail-fast`: A switch for for failing fast on meeting error.
1. `--has-vsphere-plugin`: A switch to indicate whether the Velero vSphere plugin is installed for vSphere environment.
1. `--worker-os`: A switch to indicate the workload should be ran on windows or linux OS.

These configurations or parameters are used to generate install options for Velero for each test suite.

Expand Down Expand Up @@ -131,6 +132,7 @@ Below is a mapping between `make` variables to E2E configuration flags.
1. `DEBUG_VELERO_POD_RESTART`: `-debug-velero-pod-restart`. Optional.
1. `FAIL_FAST`: `--fail-fast`. Optional.
1. `HAS_VSPHERE_PLUGIN`: `--has-vsphere-plugin`. Optional.
1. `WORKER_OS`: `--worker-os`. Optional.



Expand Down
22 changes: 20 additions & 2 deletions test/e2e/backup/backup.go
Original file line number Diff line number Diff line change
Expand Up @@ -140,7 +140,15 @@ func BackupRestoreTest(backupRestoreTestConfig BackupRestoreTestConfig) {
veleroCfg.ProvideSnapshotsVolumeParam = provideSnapshotVolumesParmInBackup

// Set DefaultVolumesToFsBackup to false since DefaultVolumesToFsBackup was set to true during installation
Expect(RunKibishiiTests(veleroCfg, backupName, restoreName, "", kibishiiNamespace, useVolumeSnapshots, false)).To(Succeed(),
Expect(RunKibishiiTests(
veleroCfg,
backupName,
restoreName,
"",
kibishiiNamespace,
useVolumeSnapshots,
false,
)).To(Succeed(),
"Failed to successfully backup and restore Kibishii namespace")
})

Expand Down Expand Up @@ -212,7 +220,17 @@ func BackupRestoreTest(backupRestoreTestConfig BackupRestoreTestConfig) {
}
veleroCfg.ProvideSnapshotsVolumeParam = !provideSnapshotVolumesParmInBackup
workloadNS := kibishiiNamespace + bsl
Expect(RunKibishiiTests(veleroCfg, backupName, restoreName, bsl, workloadNS, useVolumeSnapshots, !useVolumeSnapshots)).To(Succeed(),
Expect(
RunKibishiiTests(
veleroCfg,
backupName,
restoreName,
bsl,
workloadNS,
useVolumeSnapshots,
!useVolumeSnapshots,
),
).To(Succeed(),
"Failed to successfully backup and restore Kibishii namespace using BSL %s", bsl)
}
})
Expand Down
1 change: 1 addition & 0 deletions test/e2e/backups/deletion.go
Original file line number Diff line number Diff line change
Expand Up @@ -125,6 +125,7 @@ func runBackupDeletionTests(client TestClient, veleroCfg VeleroConfig, backupLoc
kibishiiDirectory,
DefaultKibishiiData,
veleroCfg.ImageRegistryProxy,
veleroCfg.WorkerOS,
); err != nil {
return errors.Wrapf(err, "Failed to install and prepare data for kibishii %s", ns)
}
Expand Down
1 change: 1 addition & 0 deletions test/e2e/backups/ttl.go
Original file line number Diff line number Diff line change
Expand Up @@ -110,6 +110,7 @@ func TTLTest() {
veleroCfg.KibishiiDirectory,
DefaultKibishiiData,
veleroCfg.ImageRegistryProxy,
veleroCfg.WorkerOS,
)).To(Succeed())
})

Expand Down
12 changes: 10 additions & 2 deletions test/e2e/basic/backup-volume-info/base.go
Original file line number Diff line number Diff line change
Expand Up @@ -138,8 +138,16 @@ func (v *BackupVolumeInfo) CreateResources() error {
// Hitting issue https://github.com/vmware-tanzu/velero/issues/7388
// So populate data only to some of pods, leave other pods empty to verify empty PV datamover
if i%2 == 0 {
Expect(CreateFileToPod(v.Ctx, createNSName, pod.Name, DefaultContainerName, vols[i].Name,
fmt.Sprintf("file-%s", pod.Name), CreateFileContent(createNSName, pod.Name, vols[i].Name))).To(Succeed())
Expect(CreateFileToPod(
v.Ctx,
createNSName,
pod.Name,
DefaultContainerName,
vols[i].Name,
fmt.Sprintf("file-%s", pod.Name),
CreateFileContent(createNSName, pod.Name, vols[i].Name),
WorkerOSLinux,
)).To(Succeed())
}
}
}
Expand Down
11 changes: 9 additions & 2 deletions test/e2e/basic/namespace-mapping.go
Original file line number Diff line number Diff line change
Expand Up @@ -101,6 +101,7 @@ func (n *NamespaceMapping) CreateResources() error {
n.VeleroCfg.KibishiiDirectory,
n.kibishiiData,
n.VeleroCfg.ImageRegistryProxy,
n.VeleroCfg.WorkerOS,
)).To(Succeed())
})
}
Expand All @@ -111,8 +112,14 @@ func (n *NamespaceMapping) Verify() error {
for index, ns := range n.MappedNamespaceList {
n.kibishiiData.Levels = len(*n.NSIncluded) + index
By(fmt.Sprintf("Verify workload %s after restore ", ns), func() {
Expect(KibishiiVerifyAfterRestore(n.Client, ns,
n.Ctx, n.kibishiiData, "")).To(Succeed(), "Fail to verify workload after restore")
Expect(KibishiiVerifyAfterRestore(
n.Client,
ns,
n.Ctx,
n.kibishiiData,
"",
n.VeleroCfg.WorkerOS,
)).To(Succeed(), "Fail to verify workload after restore")
})
}
for _, ns := range *n.NSIncluded {
Expand Down
4 changes: 2 additions & 2 deletions test/e2e/basic/resources-check/namespaces.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ import (

type MultiNSBackup struct {
TestCase
IsScalTest bool
IsScaleTest bool
NSExcluded *[]string
TimeoutDuration time.Duration
}
Expand All @@ -43,7 +43,7 @@ func (m *MultiNSBackup) Init() error {
m.RestoreName = "restore-" + m.CaseBaseName
m.NSExcluded = &[]string{}

if m.IsScalTest {
if m.IsScaleTest {
m.NamespacesTotal = 2500
m.TimeoutDuration = time.Hour * 2
m.TestMsg = &TestMSG{
Expand Down
2 changes: 1 addition & 1 deletion test/e2e/basic/resources-check/resources_check.go
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ import (
func GetResourcesCheckTestCases() []VeleroBackupRestoreTest {
return []VeleroBackupRestoreTest{
&NSAnnotationCase{},
&MultiNSBackup{IsScalTest: false},
&MultiNSBackup{IsScaleTest: false},
&RBACCase{},
}
}
Expand Down
1 change: 1 addition & 0 deletions test/e2e/bsl-mgmt/deletion.go
Original file line number Diff line number Diff line change
Expand Up @@ -162,6 +162,7 @@ func BslDeletionTest(useVolumeSnapshots bool) {
veleroCfg.KibishiiDirectory,
DefaultKibishiiData,
veleroCfg.ImageRegistryProxy,
veleroCfg.WorkerOS,
)).To(Succeed())
})

Expand Down
10 changes: 8 additions & 2 deletions test/e2e/e2e_suite_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -356,6 +356,12 @@ func init() {
"",
"The image registry proxy, e.g. when the DockerHub access limitation is reached, can use available proxy to replace. Default is nil.",
)
flag.StringVar(
&test.VeleroCfg.WorkerOS,
"worker-os",
"linux",
"test k8s worker node OS version, should be either linux or windows.",
)
}

// Add label [SkipVanillaZfs]:
Expand Down Expand Up @@ -621,12 +627,12 @@ var _ = Describe(

var _ = Describe(
"Backup resources should follow the specific order in schedule",
Label("PVBackup", "OptIn"),
Label("PVBackup", "OptIn", "FSB"),
OptInPVBackupTest,
)
var _ = Describe(
"Backup resources should follow the specific order in schedule",
Label("PVBackup", "OptOut"),
Label("PVBackup", "OptOut", "FSB"),
OptOutPVBackupTest,
)

Expand Down
90 changes: 54 additions & 36 deletions test/e2e/migration/migration.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,9 +23,11 @@ import (

. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"golang.org/x/mod/semver"

"github.com/vmware-tanzu/velero/test"
framework "github.com/vmware-tanzu/velero/test/e2e/test"
"github.com/vmware-tanzu/velero/test/util/common"
util "github.com/vmware-tanzu/velero/test/util/csi"
k8sutil "github.com/vmware-tanzu/velero/test/util/k8s"
"github.com/vmware-tanzu/velero/test/util/kibishii"
Expand Down Expand Up @@ -160,6 +162,10 @@ func (m *migrationE2E) Backup() error {
version, err := veleroutil.GetVeleroVersion(m.Ctx, OriginVeleroCfg.VeleroCLI, true)
Expect(err).To(Succeed(), "Fail to get Velero version")
OriginVeleroCfg.VeleroVersion = version
if OriginVeleroCfg.WorkerOS == common.WorkerOSWindows &&
(version != "main" && semver.Compare(version, "v1.16") < 0) {
Skip(fmt.Sprintf("Velero CLI version %s doesn't support Windows migration test.", version))
}

if OriginVeleroCfg.SnapshotMoveData {
OriginVeleroCfg.UseNodeAgent = true
Expand Down Expand Up @@ -197,6 +203,7 @@ func (m *migrationE2E) Backup() error {
OriginVeleroCfg.KibishiiDirectory,
&m.kibishiiData,
OriginVeleroCfg.ImageRegistryProxy,
OriginVeleroCfg.WorkerOS,
)).To(Succeed())
})

Expand Down Expand Up @@ -401,6 +408,7 @@ func (m *migrationE2E) Verify() error {
m.Ctx,
&m.kibishiiData,
"",
m.VeleroCfg.WorkerOS,
)).To(Succeed(), "Fail to verify workload after restore")
})

Expand All @@ -413,56 +421,66 @@ func (m *migrationE2E) Clean() error {
})

By("Clean resource on standby cluster.", func() {
defer func() {
By("Switch to default KubeConfig context", func() {
k8sutil.KubectlConfigUseContext(
m.Ctx,
m.VeleroCfg.DefaultClusterContext,
)
})
}()

Expect(k8sutil.KubectlConfigUseContext(
m.Ctx, m.VeleroCfg.StandbyClusterContext)).To(Succeed())
m.VeleroCfg.ClientToInstallVelero = m.VeleroCfg.StandbyClient
m.VeleroCfg.ClusterToInstallVelero = m.VeleroCfg.StandbyClusterName

By("Delete StorageClasses created by E2E")
Expect(
k8sutil.DeleteStorageClass(
m.Ctx,
*m.VeleroCfg.ClientToInstallVelero,
test.StorageClassName,
),
).To(Succeed())
Expect(
k8sutil.DeleteStorageClass(
m.Ctx,
*m.VeleroCfg.ClientToInstallVelero,
test.StorageClassName2,
),
).To(Succeed())
if err := k8sutil.DeleteStorageClass(
m.Ctx,
*m.VeleroCfg.ClientToInstallVelero,
test.StorageClassName,
); err != nil {
fmt.Println("Fail to delete StorageClass1: ", err)
return
}

if err := k8sutil.DeleteStorageClass(
m.Ctx,
*m.VeleroCfg.ClientToInstallVelero,
test.StorageClassName2,
); err != nil {
fmt.Println("Fail to delete StorageClass2: ", err)
return
}

if strings.EqualFold(m.VeleroCfg.Features, test.FeatureCSI) &&
m.VeleroCfg.UseVolumeSnapshots {
By("Delete VolumeSnapshotClass created by E2E")
Expect(
k8sutil.KubectlDeleteByFile(
m.Ctx,
fmt.Sprintf("../testdata/volume-snapshot-class/%s.yaml",
m.VeleroCfg.StandbyClusterCloudProvider),
),
).To(Succeed())
if err := k8sutil.KubectlDeleteByFile(
m.Ctx,
fmt.Sprintf("../testdata/volume-snapshot-class/%s.yaml",
m.VeleroCfg.StandbyClusterCloudProvider),
); err != nil {
fmt.Println("Fail to delete VolumeSnapshotClass: ", err)
return
}
}

Expect(veleroutil.VeleroUninstall(m.Ctx, m.VeleroCfg)).To(Succeed())

Expect(
k8sutil.DeleteNamespace(
m.Ctx,
*m.VeleroCfg.StandbyClient,
m.CaseBaseName,
true,
),
).To(Succeed())
})
if err := veleroutil.VeleroUninstall(m.Ctx, m.VeleroCfg); err != nil {
fmt.Println("Fail to uninstall Velero: ", err)
return
}

By("Switch to default KubeConfig context", func() {
Expect(k8sutil.KubectlConfigUseContext(
if err := k8sutil.DeleteNamespace(
m.Ctx,
m.VeleroCfg.DefaultClusterContext,
)).To(Succeed())
*m.VeleroCfg.StandbyClient,
m.CaseBaseName,
true,
); err != nil {
fmt.Println("Fail to delete the workload namespace: ", err)
return
}
})

return nil
Expand Down
Loading
Loading