|
| 1 | +/* |
| 2 | +Copyright 2021 The Kubernetes Authors. |
| 3 | +
|
| 4 | +Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | +you may not use this file except in compliance with the License. |
| 6 | +You may obtain a copy of the License at |
| 7 | +
|
| 8 | + http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | +
|
| 10 | +Unless required by applicable law or agreed to in writing, software |
| 11 | +distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | +See the License for the specific language governing permissions and |
| 14 | +limitations under the License. |
| 15 | +*/ |
| 16 | + |
| 17 | +package e2e |
| 18 | + |
| 19 | +import ( |
| 20 | + "context" |
| 21 | + "fmt" |
| 22 | + "os" |
| 23 | + "strconv" |
| 24 | + "time" |
| 25 | + |
| 26 | + "github.com/onsi/ginkgo/v2" |
| 27 | + "github.com/onsi/gomega" |
| 28 | + appsv1 "k8s.io/api/apps/v1" |
| 29 | + v1 "k8s.io/api/core/v1" |
| 30 | + storagev1 "k8s.io/api/storage/v1" |
| 31 | + apierrors "k8s.io/apimachinery/pkg/api/errors" |
| 32 | + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" |
| 33 | + clientset "k8s.io/client-go/kubernetes" |
| 34 | + "k8s.io/kubernetes/test/e2e/framework" |
| 35 | + fnodes "k8s.io/kubernetes/test/e2e/framework/node" |
| 36 | + fpod "k8s.io/kubernetes/test/e2e/framework/pod" |
| 37 | + fpv "k8s.io/kubernetes/test/e2e/framework/pv" |
| 38 | + admissionapi "k8s.io/pod-security-admission/api" |
| 39 | +) |
| 40 | + |
| 41 | +var _ = ginkgo.Describe("[csi-guest] [csi-supervisor] CNS Unregister Volume", func() { |
| 42 | + f := framework.NewDefaultFramework("cns-unregister-volume") |
| 43 | + f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged |
| 44 | + const defaultVolumeOpsScale = 30 |
| 45 | + const defaultVolumeOpsScaleWCP = 29 |
| 46 | + var ( |
| 47 | + client clientset.Interface |
| 48 | + c clientset.Interface |
| 49 | + fullSyncWaitTime int |
| 50 | + namespace string |
| 51 | + scParameters map[string]string |
| 52 | + storagePolicyName string |
| 53 | + volumeOpsScale int |
| 54 | + isServiceStopped bool |
| 55 | + serviceName string |
| 56 | + csiReplicaCount int32 |
| 57 | + deployment *appsv1.Deployment |
| 58 | + ) |
| 59 | + |
| 60 | + ginkgo.BeforeEach(func() { |
| 61 | + bootstrap() |
| 62 | + client = f.ClientSet |
| 63 | + namespace = getNamespaceToRunTests(f) |
| 64 | + scParameters = make(map[string]string) |
| 65 | + isServiceStopped = false |
| 66 | + storagePolicyName = GetAndExpectStringEnvVar(envStoragePolicyNameForSharedDatastores) |
| 67 | + ctx, cancel := context.WithCancel(context.Background()) |
| 68 | + defer cancel() |
| 69 | + nodeList, err := fnodes.GetReadySchedulableNodes(ctx, f.ClientSet) |
| 70 | + framework.ExpectNoError(err, "Unable to find ready and schedulable Node") |
| 71 | + |
| 72 | + if !(len(nodeList.Items) > 0) { |
| 73 | + framework.Failf("Unable to find ready and schedulable Node") |
| 74 | + } |
| 75 | + |
| 76 | + if guestCluster { |
| 77 | + svcClient, svNamespace := getSvcClientAndNamespace() |
| 78 | + setResourceQuota(svcClient, svNamespace, rqLimit) |
| 79 | + } |
| 80 | + |
| 81 | + if os.Getenv("VOLUME_OPS_SCALE") != "" { |
| 82 | + volumeOpsScale, err = strconv.Atoi(os.Getenv(envVolumeOperationsScale)) |
| 83 | + gomega.Expect(err).NotTo(gomega.HaveOccurred()) |
| 84 | + } else { |
| 85 | + if vanillaCluster { |
| 86 | + volumeOpsScale = defaultVolumeOpsScale |
| 87 | + } else { |
| 88 | + volumeOpsScale = defaultVolumeOpsScaleWCP |
| 89 | + } |
| 90 | + } |
| 91 | + framework.Logf("VOLUME_OPS_SCALE is set to %v", volumeOpsScale) |
| 92 | + |
| 93 | + if os.Getenv(envFullSyncWaitTime) != "" { |
| 94 | + fullSyncWaitTime, err = strconv.Atoi(os.Getenv(envFullSyncWaitTime)) |
| 95 | + gomega.Expect(err).NotTo(gomega.HaveOccurred()) |
| 96 | + // Full sync interval can be 1 min at minimum so full sync wait time has to be more than 120s |
| 97 | + if fullSyncWaitTime < 120 || fullSyncWaitTime > defaultFullSyncWaitTime { |
| 98 | + framework.Failf("The FullSync Wait time %v is not set correctly", fullSyncWaitTime) |
| 99 | + } |
| 100 | + } else { |
| 101 | + fullSyncWaitTime = defaultFullSyncWaitTime |
| 102 | + } |
| 103 | + |
| 104 | + // Get CSI Controller's replica count from the setup |
| 105 | + controllerClusterConfig := os.Getenv(contollerClusterKubeConfig) |
| 106 | + c = client |
| 107 | + if controllerClusterConfig != "" { |
| 108 | + framework.Logf("Creating client for remote kubeconfig") |
| 109 | + remoteC, err := createKubernetesClientFromConfig(controllerClusterConfig) |
| 110 | + gomega.Expect(err).NotTo(gomega.HaveOccurred()) |
| 111 | + c = remoteC |
| 112 | + } |
| 113 | + deployment, err = c.AppsV1().Deployments(csiSystemNamespace).Get(ctx, |
| 114 | + vSphereCSIControllerPodNamePrefix, metav1.GetOptions{}) |
| 115 | + gomega.Expect(err).NotTo(gomega.HaveOccurred()) |
| 116 | + csiReplicaCount = *deployment.Spec.Replicas |
| 117 | + }) |
| 118 | + |
| 119 | + ginkgo.AfterEach(func() { |
| 120 | + ctx, cancel := context.WithCancel(context.Background()) |
| 121 | + defer cancel() |
| 122 | + if isServiceStopped { |
| 123 | + if serviceName == "CSI" { |
| 124 | + framework.Logf("Starting CSI driver") |
| 125 | + ignoreLabels := make(map[string]string) |
| 126 | + err := updateDeploymentReplicawithWait(c, csiReplicaCount, vSphereCSIControllerPodNamePrefix, |
| 127 | + csiSystemNamespace) |
| 128 | + gomega.Expect(err).NotTo(gomega.HaveOccurred()) |
| 129 | + |
| 130 | + // Wait for the CSI Pods to be up and Running |
| 131 | + list_of_pods, err := fpod.GetPodsInNamespace(ctx, client, csiSystemNamespace, ignoreLabels) |
| 132 | + gomega.Expect(err).NotTo(gomega.HaveOccurred()) |
| 133 | + num_csi_pods := len(list_of_pods) |
| 134 | + err = fpod.WaitForPodsRunningReady(ctx, client, csiSystemNamespace, int32(num_csi_pods), 0, |
| 135 | + pollTimeout) |
| 136 | + gomega.Expect(err).NotTo(gomega.HaveOccurred()) |
| 137 | + } else if serviceName == hostdServiceName { |
| 138 | + framework.Logf("In afterEach function to start the hostd service on all hosts") |
| 139 | + hostIPs := getAllHostsIP(ctx, true) |
| 140 | + for _, hostIP := range hostIPs { |
| 141 | + startHostDOnHost(ctx, hostIP) |
| 142 | + } |
| 143 | + } else { |
| 144 | + vcAddress := e2eVSphere.Config.Global.VCenterHostname + ":" + sshdPort |
| 145 | + ginkgo.By(fmt.Sprintf("Starting %v on the vCenter host", serviceName)) |
| 146 | + err := invokeVCenterServiceControl(ctx, startOperation, serviceName, vcAddress) |
| 147 | + gomega.Expect(err).NotTo(gomega.HaveOccurred()) |
| 148 | + err = waitVCenterServiceToBeInState(ctx, serviceName, vcAddress, svcRunningMessage) |
| 149 | + gomega.Expect(err).NotTo(gomega.HaveOccurred()) |
| 150 | + } |
| 151 | + } |
| 152 | + |
| 153 | + ginkgo.By(fmt.Sprintf("Resetting provisioner time interval to %s sec", defaultProvisionerTimeInSec)) |
| 154 | + updateCSIDeploymentProvisionerTimeout(c, csiSystemNamespace, defaultProvisionerTimeInSec) |
| 155 | + |
| 156 | + if supervisorCluster { |
| 157 | + deleteResourceQuota(client, namespace) |
| 158 | + dumpSvcNsEventsOnTestFailure(client, namespace) |
| 159 | + } |
| 160 | + if guestCluster { |
| 161 | + svcClient, svNamespace := getSvcClientAndNamespace() |
| 162 | + setResourceQuota(svcClient, svNamespace, defaultrqLimit) |
| 163 | + dumpSvcNsEventsOnTestFailure(svcClient, svNamespace) |
| 164 | + } |
| 165 | + }) |
| 166 | + |
| 167 | + ginkgo.It("export detached volume", func() { |
| 168 | + serviceName = vsanhealthServiceName |
| 169 | + exportDetachedVolume(namespace, client, storagePolicyName, scParameters, |
| 170 | + volumeOpsScale, true) |
| 171 | + }) |
| 172 | +}) |
| 173 | + |
| 174 | +func exportDetachedVolume(namespace string, client clientset.Interface, |
| 175 | + storagePolicyName string, scParameters map[string]string, volumeOpsScale int, extendVolume bool) { |
| 176 | + ctx, cancel := context.WithCancel(context.Background()) |
| 177 | + defer cancel() |
| 178 | + var storageclass *storagev1.StorageClass |
| 179 | + var persistentvolumes []*v1.PersistentVolume |
| 180 | + var pvclaims []*v1.PersistentVolumeClaim |
| 181 | + var err error |
| 182 | + //var fullSyncWaitTime int |
| 183 | + pvclaims = make([]*v1.PersistentVolumeClaim, volumeOpsScale) |
| 184 | + |
| 185 | + // Get a config to talk to the apiserver |
| 186 | + restConfig := getRestConfigClient() |
| 187 | + |
| 188 | + framework.Logf("storagePolicyName %v", storagePolicyName) |
| 189 | + framework.Logf("extendVolume %v", extendVolume) |
| 190 | + |
| 191 | + if supervisorCluster { |
| 192 | + ginkgo.By("CNS_TEST: Running for WCP setup") |
| 193 | + thickProvPolicy := os.Getenv(envStoragePolicyNameWithThickProvision) |
| 194 | + if thickProvPolicy == "" { |
| 195 | + ginkgo.Skip(envStoragePolicyNameWithThickProvision + " env variable not set") |
| 196 | + } |
| 197 | + profileID := e2eVSphere.GetSpbmPolicyID(thickProvPolicy) |
| 198 | + scParameters[scParamStoragePolicyID] = profileID |
| 199 | + // create resource quota |
| 200 | + createResourceQuota(client, namespace, rqLimit, thickProvPolicy) |
| 201 | + storageclass, err = createStorageClass(client, scParameters, nil, "", "", true, thickProvPolicy) |
| 202 | + gomega.Expect(err).NotTo(gomega.HaveOccurred()) |
| 203 | + } else { |
| 204 | + ginkgo.By("CNS_TEST: Running for GC setup") |
| 205 | + thickProvPolicy := os.Getenv(envStoragePolicyNameWithThickProvision) |
| 206 | + if thickProvPolicy == "" { |
| 207 | + ginkgo.Skip(envStoragePolicyNameWithThickProvision + " env variable not set") |
| 208 | + } |
| 209 | + createResourceQuota(client, namespace, rqLimit, thickProvPolicy) |
| 210 | + scParameters[svStorageClassName] = thickProvPolicy |
| 211 | + scParameters[scParamFsType] = ext4FSType |
| 212 | + storageclass, err = client.StorageV1().StorageClasses().Get(ctx, thickProvPolicy, metav1.GetOptions{}) |
| 213 | + if !apierrors.IsNotFound(err) { |
| 214 | + gomega.Expect(err).NotTo(gomega.HaveOccurred()) |
| 215 | + } |
| 216 | + var allowExpansion = true |
| 217 | + storageclass.AllowVolumeExpansion = &allowExpansion |
| 218 | + storageclass, err = client.StorageV1().StorageClasses().Update(ctx, storageclass, metav1.UpdateOptions{}) |
| 219 | + gomega.Expect(err).NotTo(gomega.HaveOccurred()) |
| 220 | + } |
| 221 | + gomega.Expect(err).NotTo(gomega.HaveOccurred()) |
| 222 | + |
| 223 | + ginkgo.By("Creating PVCs using the Storage Class") |
| 224 | + framework.Logf("VOLUME_OPS_SCALE is set to %v", volumeOpsScale) |
| 225 | + for i := 0; i < volumeOpsScale; i++ { |
| 226 | + framework.Logf("Creating pvc%v", i) |
| 227 | + pvclaims[i], err = fpv.CreatePVC(ctx, client, namespace, |
| 228 | + getPersistentVolumeClaimSpecWithStorageClass(namespace, diskSize, storageclass, nil, "")) |
| 229 | + gomega.Expect(err).NotTo(gomega.HaveOccurred()) |
| 230 | + } |
| 231 | + |
| 232 | + ginkgo.By("Waiting for all claims to be in bound state") |
| 233 | + persistentvolumes, err = fpv.WaitForPVClaimBoundPhase(ctx, client, pvclaims, |
| 234 | + 2*framework.ClaimProvisionTimeout) |
| 235 | + gomega.Expect(err).NotTo(gomega.HaveOccurred()) |
| 236 | + |
| 237 | + // TODO: Add a logic to check for the no orphan volumes |
| 238 | + defer func() { |
| 239 | + for _, claim := range pvclaims { |
| 240 | + err := fpv.DeletePersistentVolumeClaim(ctx, client, claim.Name, namespace) |
| 241 | + gomega.Expect(err).NotTo(gomega.HaveOccurred()) |
| 242 | + } |
| 243 | + ginkgo.By("Verify PVs, volumes are deleted from CNS") |
| 244 | + for _, pv := range persistentvolumes { |
| 245 | + err := fpv.WaitForPersistentVolumeDeleted(ctx, client, pv.Name, framework.Poll, |
| 246 | + framework.PodDeleteTimeout) |
| 247 | + gomega.Expect(err).NotTo(gomega.HaveOccurred()) |
| 248 | + volumeID := pv.Spec.CSI.VolumeHandle |
| 249 | + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volumeID) |
| 250 | + gomega.Expect(err).NotTo(gomega.HaveOccurred(), |
| 251 | + fmt.Sprintf("Volume: %s should not be present in the "+ |
| 252 | + "CNS after it is deleted from kubernetes", volumeID)) |
| 253 | + } |
| 254 | + }() |
| 255 | + for _, pv := range persistentvolumes { |
| 256 | + volumeID := pv.Spec.CSI.VolumeHandle |
| 257 | + time.Sleep(30 * time.Second) |
| 258 | + |
| 259 | + ginkgo.By("Create CNS unregister volume with above created FCD " + pv.Spec.CSI.VolumeHandle) |
| 260 | + cnsUnRegisterVolume := getCNSUnRegisterVolumeSpec(ctx, namespace, volumeID) |
| 261 | + err = createCNSUnRegisterVolume(ctx, restConfig, cnsUnRegisterVolume) |
| 262 | + gomega.Expect(err).NotTo(gomega.HaveOccurred()) |
| 263 | + framework.ExpectNoError(waitForCNSUnRegisterVolumeToGetCreated(ctx, |
| 264 | + restConfig, namespace, cnsUnRegisterVolume, poll, supervisorClusterOperationsTimeout)) |
| 265 | + cnsRegisterVolumeName := cnsUnRegisterVolume.GetName() |
| 266 | + framework.Logf("CNS register volume name : %s", cnsRegisterVolumeName) |
| 267 | + } |
| 268 | + |
| 269 | + ginkgo.By("Verify PVs, volumes are deleted from CNS") |
| 270 | + for _, pv := range persistentvolumes { |
| 271 | + err := fpv.WaitForPersistentVolumeDeleted(ctx, client, pv.Name, framework.Poll, |
| 272 | + framework.PodDeleteTimeout) |
| 273 | + gomega.Expect(err).NotTo(gomega.HaveOccurred()) |
| 274 | + volumeID := pv.Spec.CSI.VolumeHandle |
| 275 | + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volumeID) |
| 276 | + gomega.Expect(err).NotTo(gomega.HaveOccurred(), |
| 277 | + fmt.Sprintf("Volume: %s should not be present in the "+ |
| 278 | + "CNS after it is deleted from kubernetes", volumeID)) |
| 279 | + } |
| 280 | + |
| 281 | + defaultDatastore = getDefaultDatastore(ctx) |
| 282 | + ginkgo.By(fmt.Sprintf("defaultDatastore %v sec", defaultDatastore)) |
| 283 | + |
| 284 | + for _, pv1 := range persistentvolumes { |
| 285 | + ginkgo.By(fmt.Sprintf("Deleting FCD: %s", pv1.Spec.CSI.VolumeHandle)) |
| 286 | + err = deleteFcdWithRetriesForSpecificErr(ctx, pv1.Spec.CSI.VolumeHandle, defaultDatastore.Reference(), |
| 287 | + []string{disklibUnlinkErr}, []string{objOrItemNotFoundErr}) |
| 288 | + gomega.Expect(err).NotTo(gomega.HaveOccurred()) |
| 289 | + } |
| 290 | +} |
0 commit comments