Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 7 additions & 0 deletions build.env
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,13 @@ CSI_IMAGE_VERSION=canary
# cephcsi upgrade version
CSI_UPGRADE_VERSION=v3.13.1

# ceph-csi-operator version used for e2e test
# TODO: use release tag which includes the bug fixes
# - https://github.com/ceph/ceph-csi-operator/pull/206
# - https://github.com/ceph/ceph-csi-operator/pull/207
# Till then use the latest tag instead of the release.
CEPH_CSI_OPERATOR_VERSION=latest

# Ceph version to use
BASE_IMAGE=quay.io/ceph/ceph:v19
CEPH_VERSION=squid
Expand Down
96 changes: 58 additions & 38 deletions e2e/cephfs.go
Original file line number Diff line number Diff line change
Expand Up @@ -46,8 +46,19 @@ var (
subvolumegroup = "e2e"
fileSystemName = "myfs"
fileSystemPoolName = "myfs-replicated"

helmCephFSPodsLabel = "ceph-csi-cephfs"

operatorCephFSDeploymentName = "cephfs.csi.ceph.com-ctrlplugin"
operatorCephFSDaemonsetName = "cephfs.csi.ceph.com-nodeplugin"

cephFSDeployment CephFSDeploymentMethod
)

type CephFSDeployment struct {
DriverInfo
}

func deployCephfsPlugin() {
// delete objects deployed by rook

Expand Down Expand Up @@ -165,6 +176,18 @@ func validateSubvolumePath(f *framework.Framework, pvcName, pvcNamespace, fileSy
return nil
}

func NewCephFSDeployment(c clientset.Interface) CephFSDeploymentMethod {
return &CephFSDeployment{
DriverInfo: DriverInfo{
clientSet: c,
deploymentName: cephFSDeploymentName,
daemonsetName: cephFSDeamonSetName,
helmPodLabelName: helmCephFSPodsLabel,
driverContainers: []string{cephFSContainerName},
},
}
}

var _ = Describe(cephfsType, func() {
f := framework.NewDefaultFramework(cephfsType)
f.NamespacePodSecurityEnforceLevel = api.LevelPrivileged
Expand All @@ -175,13 +198,20 @@ var _ = Describe(cephfsType, func() {
Skip("Skipping CephFS E2E")
}
c = f.ClientSet
if deployCephFS {
if cephCSINamespace != defaultNs {
err := createNamespace(c, cephCSINamespace)
if err != nil {
framework.Failf("failed to create namespace %s: %v", cephCSINamespace, err)
}
cephFSDeployment = NewCephFSDeployment(c)
if operatorDeployment {
cephFSDeployment = NewCephFSOperatorDeployment(c)
}

// No need to create the namespace if ceph-csi is deployed via helm or operator.
if cephCSINamespace != defaultNs && !(helmTest || operatorDeployment) {
err := createNamespace(c, cephCSINamespace)
if err != nil {
framework.Failf("failed to create namespace %s: %v", cephCSINamespace, err)
}
}

if deployCephFS {
deployCephfsPlugin()
}
err := createConfigMap(cephFSDirPath, f.ClientSet, f)
Expand All @@ -208,12 +238,9 @@ var _ = Describe(cephfsType, func() {
}
deployVault(f.ClientSet, deployTimeout)

// wait for cluster name update in deployment
containers := []string{cephFSContainerName}
err = waitForContainersArgsUpdate(c, cephCSINamespace, cephFSDeploymentName,
"clustername", defaultClusterName, containers, deployTimeout)
err = cephFSDeployment.setClusterName(defaultClusterName)
if err != nil {
framework.Failf("timeout waiting for deployment update %s/%s: %v", cephCSINamespace, cephFSDeploymentName, err)
framework.Failf("failed to set cluster name: %v", err)
}

err = createSubvolumegroup(f, fileSystemName, subvolumegroup)
Expand All @@ -226,13 +253,14 @@ var _ = Describe(cephfsType, func() {
if !testCephFS || upgradeTesting {
Skip("Skipping CephFS E2E")
}

if CurrentSpecReport().Failed() {
// log pods created by helm chart
logsCSIPods("app=ceph-csi-cephfs", c)
logsCSIPods("app="+helmCephFSPodsLabel, c)
// log provisioner
logsCSIPods("app=csi-cephfsplugin-provisioner", c)
logsCSIPods("app="+cephFSDeployment.getDeploymentName(), c)
// log node plugin
logsCSIPods("app=csi-cephfsplugin", c)
logsCSIPods("app="+cephFSDeployment.getDaemonsetName(), c)

// log all details from the namespace where Ceph-CSI is deployed
e2edebug.DumpAllNamespaceInfo(context.TODO(), c, cephCSINamespace)
Expand Down Expand Up @@ -266,11 +294,12 @@ var _ = Describe(cephfsType, func() {

if deployCephFS {
deleteCephfsPlugin()
if cephCSINamespace != defaultNs {
err = deleteNamespace(c, cephCSINamespace)
if err != nil {
framework.Failf("failed to delete namespace %s: %v", cephCSINamespace, err)
}
}
// No need to delete the namespace if ceph-csi is deployed via helm or operator.
if cephCSINamespace != defaultNs && !(helmTest || operatorDeployment) {
err = deleteNamespace(c, cephCSINamespace)
if err != nil {
framework.Failf("failed to delete namespace %s: %v", cephCSINamespace, err)
}
}
})
Expand Down Expand Up @@ -299,16 +328,16 @@ var _ = Describe(cephfsType, func() {
}

By("checking provisioner deployment is running", func() {
err := waitForDeploymentComplete(f.ClientSet, cephFSDeploymentName, cephCSINamespace, deployTimeout)
err := waitForDeploymentComplete(f.ClientSet, cephFSDeployment.getDeploymentName(), cephCSINamespace, deployTimeout)
if err != nil {
framework.Failf("timeout waiting for deployment %s: %v", cephFSDeploymentName, err)
framework.Failf("timeout waiting for deployment %s: %v", cephFSDeployment.getDeploymentName(), err)
}
})

By("checking nodeplugin daemonset pods are running", func() {
err := waitForDaemonSets(cephFSDeamonSetName, cephCSINamespace, f.ClientSet, deployTimeout)
err := waitForDaemonSets(cephFSDeployment.getDaemonsetName(), cephCSINamespace, f.ClientSet, deployTimeout)
if err != nil {
framework.Failf("timeout waiting for daemonset %s: %v", cephFSDeamonSetName, err)
framework.Failf("timeout waiting for daemonset %s: %v", cephFSDeployment.getDaemonsetName(), err)
}
})

Expand Down Expand Up @@ -338,7 +367,7 @@ var _ = Describe(cephfsType, func() {
}

err = verifySeLinuxMountOption(f, pvcPath, appPath,
cephFSDeamonSetName, cephFSContainerName, cephCSINamespace)
cephFSDeployment.getDaemonsetName(), cephFSContainerName, cephCSINamespace)
if err != nil {
framework.Failf("failed to verify mount options: %v", err)
}
Expand Down Expand Up @@ -764,7 +793,7 @@ var _ = Describe(cephfsType, func() {
}
}
// Kill ceph-fuse in cephfs-csi node plugin Pods.
nodePluginSelector, err := getDaemonSetLabelSelector(f, cephCSINamespace, cephFSDeamonSetName)
nodePluginSelector, err := getDaemonSetLabelSelector(f, cephCSINamespace, cephFSDeployment.getDaemonsetName())
if err != nil {
framework.Failf("failed to get node plugin DaemonSet label selector: %v", err)
}
Expand Down Expand Up @@ -2498,20 +2527,11 @@ var _ = Describe(cephfsType, func() {
framework.Failf("failed to create configmap: %v", err)
}

// delete csi pods
err = deletePodWithLabel("app in (ceph-csi-cephfs, csi-cephfsplugin, csi-cephfsplugin-provisioner)",
cephCSINamespace, false)
if err != nil {
framework.Failf("failed to delete pods with labels: %v", err)
}
// wait for csi pods to come up
err = waitForDaemonSets(cephFSDeamonSetName, cephCSINamespace, f.ClientSet, deployTimeout)
if err != nil {
framework.Failf("timeout waiting for daemonset pods: %v", err)
}
err = waitForDeploymentComplete(f.ClientSet, cephFSDeploymentName, cephCSINamespace, deployTimeout)
// restart csi pods for the configmap to take effect.
err = recreateCSIPods(f, cephFSDeployment.getPodSelector(),
cephFSDeployment.getDaemonsetName(), cephFSDeployment.getDeploymentName())
if err != nil {
framework.Failf("timeout waiting for deployment pods: %v", err)
framework.Failf("failed to recreate cephfs csi pods: %v", err)
}
}

Expand Down
4 changes: 2 additions & 2 deletions e2e/cephfs_helper.go
Original file line number Diff line number Diff line change
Expand Up @@ -161,7 +161,7 @@ func unmountCephFSVolume(f *framework.Framework, appName, pvcName string) error
stdErr, err := execCommandInDaemonsetPod(
f,
cmd,
cephFSDeamonSetName,
cephFSDeployment.getDaemonsetName(),
pod.Spec.NodeName,
cephFSContainerName,
cephCSINamespace)
Expand Down Expand Up @@ -396,7 +396,7 @@ func validateEncryptedCephfs(f *framework.Framework, pvName, appName string) err
pod.UID,
pvName)

selector, err := getDaemonSetLabelSelector(f, cephCSINamespace, cephFSDeamonSetName)
selector, err := getDaemonSetLabelSelector(f, cephCSINamespace, cephFSDeployment.getDaemonsetName())
if err != nil {
return fmt.Errorf("failed to get labels: %w", err)
}
Expand Down
5 changes: 5 additions & 0 deletions e2e/e2e_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,7 @@ func init() {
flag.StringVar(&fileSystemName, "filesystem", "myfs", "CephFS filesystem to use")
flag.StringVar(&clusterID, "clusterid", "", "Ceph cluster ID to use (defaults to `ceph fsid` detection)")
flag.StringVar(&nfsDriverName, "nfs-driver", "nfs.csi.ceph.com", "name of the driver for NFS-volumes")
flag.BoolVar(&operatorDeployment, "operator-deployment", false, "test running on deployment via operator")
setDefaultKubeconfig()

// Register framework flags, then handle flags
Expand Down Expand Up @@ -91,4 +92,8 @@ func handleFlags() {
testNFS = testCephFS
deployNFS = deployCephFS
}

if operatorDeployment {
cephCSINamespace = "ceph-csi-operator-system"
}
}
3 changes: 2 additions & 1 deletion e2e/migration.go
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,8 @@ func generateClusterIDConfigMapForMigration(f *framework.Framework, c kubernetes
return fmt.Errorf("failed to create configmap: %w", err)
}
// restart csi pods for the configmap to take effect.
err = recreateCSIPods(f, rbdPodLabels, rbdDaemonsetName, rbdDeploymentName)
err = recreateCSIPods(f,
rbdDeployment.getPodSelector(), rbdDeployment.getDaemonsetName(), rbdDeployment.getDeploymentName())
if err != nil {
return fmt.Errorf("failed to recreate rbd csi pods: %w", err)
}
Expand Down
Loading