Skip to content

Commit 1c13c2b

Browse files
committed
[CNS-UnRegisterVolume-API]: Automation tests for CNS-UnRegisterVolume API Feature
1 parent e03934b commit 1c13c2b

File tree

2 files changed

+363
-0
lines changed

2 files changed

+363
-0
lines changed
+290
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,290 @@
1+
/*
2+
Copyright 2021 The Kubernetes Authors.
3+
4+
Licensed under the Apache License, Version 2.0 (the "License");
5+
you may not use this file except in compliance with the License.
6+
You may obtain a copy of the License at
7+
8+
http://www.apache.org/licenses/LICENSE-2.0
9+
10+
Unless required by applicable law or agreed to in writing, software
11+
distributed under the License is distributed on an "AS IS" BASIS,
12+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13+
See the License for the specific language governing permissions and
14+
limitations under the License.
15+
*/
16+
17+
package e2e
18+
19+
import (
20+
"context"
21+
"fmt"
22+
"os"
23+
"strconv"
24+
"time"
25+
26+
"github.com/onsi/ginkgo/v2"
27+
"github.com/onsi/gomega"
28+
appsv1 "k8s.io/api/apps/v1"
29+
v1 "k8s.io/api/core/v1"
30+
storagev1 "k8s.io/api/storage/v1"
31+
apierrors "k8s.io/apimachinery/pkg/api/errors"
32+
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
33+
clientset "k8s.io/client-go/kubernetes"
34+
"k8s.io/kubernetes/test/e2e/framework"
35+
fnodes "k8s.io/kubernetes/test/e2e/framework/node"
36+
fpod "k8s.io/kubernetes/test/e2e/framework/pod"
37+
fpv "k8s.io/kubernetes/test/e2e/framework/pv"
38+
admissionapi "k8s.io/pod-security-admission/api"
39+
)
40+
41+
var _ = ginkgo.Describe("[csi-guest] [csi-supervisor] CNS Unregister Volume", func() {
42+
f := framework.NewDefaultFramework("cns-unregister-volume")
43+
f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged
44+
const defaultVolumeOpsScale = 30
45+
const defaultVolumeOpsScaleWCP = 29
46+
var (
47+
client clientset.Interface
48+
c clientset.Interface
49+
fullSyncWaitTime int
50+
namespace string
51+
scParameters map[string]string
52+
storagePolicyName string
53+
volumeOpsScale int
54+
isServiceStopped bool
55+
serviceName string
56+
csiReplicaCount int32
57+
deployment *appsv1.Deployment
58+
)
59+
60+
ginkgo.BeforeEach(func() {
61+
bootstrap()
62+
client = f.ClientSet
63+
namespace = getNamespaceToRunTests(f)
64+
scParameters = make(map[string]string)
65+
isServiceStopped = false
66+
storagePolicyName = GetAndExpectStringEnvVar(envStoragePolicyNameForSharedDatastores)
67+
ctx, cancel := context.WithCancel(context.Background())
68+
defer cancel()
69+
nodeList, err := fnodes.GetReadySchedulableNodes(ctx, f.ClientSet)
70+
framework.ExpectNoError(err, "Unable to find ready and schedulable Node")
71+
72+
if !(len(nodeList.Items) > 0) {
73+
framework.Failf("Unable to find ready and schedulable Node")
74+
}
75+
76+
if guestCluster {
77+
svcClient, svNamespace := getSvcClientAndNamespace()
78+
setResourceQuota(svcClient, svNamespace, rqLimit)
79+
}
80+
81+
if os.Getenv("VOLUME_OPS_SCALE") != "" {
82+
volumeOpsScale, err = strconv.Atoi(os.Getenv(envVolumeOperationsScale))
83+
gomega.Expect(err).NotTo(gomega.HaveOccurred())
84+
} else {
85+
if vanillaCluster {
86+
volumeOpsScale = defaultVolumeOpsScale
87+
} else {
88+
volumeOpsScale = defaultVolumeOpsScaleWCP
89+
}
90+
}
91+
framework.Logf("VOLUME_OPS_SCALE is set to %v", volumeOpsScale)
92+
93+
if os.Getenv(envFullSyncWaitTime) != "" {
94+
fullSyncWaitTime, err = strconv.Atoi(os.Getenv(envFullSyncWaitTime))
95+
gomega.Expect(err).NotTo(gomega.HaveOccurred())
96+
// Full sync interval can be 1 min at minimum so full sync wait time has to be more than 120s
97+
if fullSyncWaitTime < 120 || fullSyncWaitTime > defaultFullSyncWaitTime {
98+
framework.Failf("The FullSync Wait time %v is not set correctly", fullSyncWaitTime)
99+
}
100+
} else {
101+
fullSyncWaitTime = defaultFullSyncWaitTime
102+
}
103+
104+
// Get CSI Controller's replica count from the setup
105+
controllerClusterConfig := os.Getenv(contollerClusterKubeConfig)
106+
c = client
107+
if controllerClusterConfig != "" {
108+
framework.Logf("Creating client for remote kubeconfig")
109+
remoteC, err := createKubernetesClientFromConfig(controllerClusterConfig)
110+
gomega.Expect(err).NotTo(gomega.HaveOccurred())
111+
c = remoteC
112+
}
113+
deployment, err = c.AppsV1().Deployments(csiSystemNamespace).Get(ctx,
114+
vSphereCSIControllerPodNamePrefix, metav1.GetOptions{})
115+
gomega.Expect(err).NotTo(gomega.HaveOccurred())
116+
csiReplicaCount = *deployment.Spec.Replicas
117+
})
118+
119+
ginkgo.AfterEach(func() {
120+
ctx, cancel := context.WithCancel(context.Background())
121+
defer cancel()
122+
if isServiceStopped {
123+
if serviceName == "CSI" {
124+
framework.Logf("Starting CSI driver")
125+
ignoreLabels := make(map[string]string)
126+
err := updateDeploymentReplicawithWait(c, csiReplicaCount, vSphereCSIControllerPodNamePrefix,
127+
csiSystemNamespace)
128+
gomega.Expect(err).NotTo(gomega.HaveOccurred())
129+
130+
// Wait for the CSI Pods to be up and Running
131+
list_of_pods, err := fpod.GetPodsInNamespace(ctx, client, csiSystemNamespace, ignoreLabels)
132+
gomega.Expect(err).NotTo(gomega.HaveOccurred())
133+
num_csi_pods := len(list_of_pods)
134+
err = fpod.WaitForPodsRunningReady(ctx, client, csiSystemNamespace, int32(num_csi_pods), 0,
135+
pollTimeout)
136+
gomega.Expect(err).NotTo(gomega.HaveOccurred())
137+
} else if serviceName == hostdServiceName {
138+
framework.Logf("In afterEach function to start the hostd service on all hosts")
139+
hostIPs := getAllHostsIP(ctx, true)
140+
for _, hostIP := range hostIPs {
141+
startHostDOnHost(ctx, hostIP)
142+
}
143+
} else {
144+
vcAddress := e2eVSphere.Config.Global.VCenterHostname + ":" + sshdPort
145+
ginkgo.By(fmt.Sprintf("Starting %v on the vCenter host", serviceName))
146+
err := invokeVCenterServiceControl(ctx, startOperation, serviceName, vcAddress)
147+
gomega.Expect(err).NotTo(gomega.HaveOccurred())
148+
err = waitVCenterServiceToBeInState(ctx, serviceName, vcAddress, svcRunningMessage)
149+
gomega.Expect(err).NotTo(gomega.HaveOccurred())
150+
}
151+
}
152+
153+
ginkgo.By(fmt.Sprintf("Resetting provisioner time interval to %s sec", defaultProvisionerTimeInSec))
154+
updateCSIDeploymentProvisionerTimeout(c, csiSystemNamespace, defaultProvisionerTimeInSec)
155+
156+
if supervisorCluster {
157+
deleteResourceQuota(client, namespace)
158+
dumpSvcNsEventsOnTestFailure(client, namespace)
159+
}
160+
if guestCluster {
161+
svcClient, svNamespace := getSvcClientAndNamespace()
162+
setResourceQuota(svcClient, svNamespace, defaultrqLimit)
163+
dumpSvcNsEventsOnTestFailure(svcClient, svNamespace)
164+
}
165+
})
166+
167+
ginkgo.It("export detached volume", func() {
168+
serviceName = vsanhealthServiceName
169+
exportDetachedVolume(namespace, client, storagePolicyName, scParameters,
170+
volumeOpsScale, true)
171+
})
172+
})
173+
174+
func exportDetachedVolume(namespace string, client clientset.Interface,
175+
storagePolicyName string, scParameters map[string]string, volumeOpsScale int, extendVolume bool) {
176+
ctx, cancel := context.WithCancel(context.Background())
177+
defer cancel()
178+
var storageclass *storagev1.StorageClass
179+
var persistentvolumes []*v1.PersistentVolume
180+
var pvclaims []*v1.PersistentVolumeClaim
181+
var err error
182+
//var fullSyncWaitTime int
183+
pvclaims = make([]*v1.PersistentVolumeClaim, volumeOpsScale)
184+
185+
// Get a config to talk to the apiserver
186+
restConfig := getRestConfigClient()
187+
188+
framework.Logf("storagePolicyName %v", storagePolicyName)
189+
framework.Logf("extendVolume %v", extendVolume)
190+
191+
if supervisorCluster {
192+
ginkgo.By("CNS_TEST: Running for WCP setup")
193+
thickProvPolicy := os.Getenv(envStoragePolicyNameWithThickProvision)
194+
if thickProvPolicy == "" {
195+
ginkgo.Skip(envStoragePolicyNameWithThickProvision + " env variable not set")
196+
}
197+
profileID := e2eVSphere.GetSpbmPolicyID(thickProvPolicy)
198+
scParameters[scParamStoragePolicyID] = profileID
199+
// create resource quota
200+
createResourceQuota(client, namespace, rqLimit, thickProvPolicy)
201+
storageclass, err = createStorageClass(client, scParameters, nil, "", "", true, thickProvPolicy)
202+
gomega.Expect(err).NotTo(gomega.HaveOccurred())
203+
} else {
204+
ginkgo.By("CNS_TEST: Running for GC setup")
205+
thickProvPolicy := os.Getenv(envStoragePolicyNameWithThickProvision)
206+
if thickProvPolicy == "" {
207+
ginkgo.Skip(envStoragePolicyNameWithThickProvision + " env variable not set")
208+
}
209+
createResourceQuota(client, namespace, rqLimit, thickProvPolicy)
210+
scParameters[svStorageClassName] = thickProvPolicy
211+
scParameters[scParamFsType] = ext4FSType
212+
storageclass, err = client.StorageV1().StorageClasses().Get(ctx, thickProvPolicy, metav1.GetOptions{})
213+
if !apierrors.IsNotFound(err) {
214+
gomega.Expect(err).NotTo(gomega.HaveOccurred())
215+
}
216+
var allowExpansion = true
217+
storageclass.AllowVolumeExpansion = &allowExpansion
218+
storageclass, err = client.StorageV1().StorageClasses().Update(ctx, storageclass, metav1.UpdateOptions{})
219+
gomega.Expect(err).NotTo(gomega.HaveOccurred())
220+
}
221+
gomega.Expect(err).NotTo(gomega.HaveOccurred())
222+
223+
ginkgo.By("Creating PVCs using the Storage Class")
224+
framework.Logf("VOLUME_OPS_SCALE is set to %v", volumeOpsScale)
225+
for i := 0; i < volumeOpsScale; i++ {
226+
framework.Logf("Creating pvc%v", i)
227+
pvclaims[i], err = fpv.CreatePVC(ctx, client, namespace,
228+
getPersistentVolumeClaimSpecWithStorageClass(namespace, diskSize, storageclass, nil, ""))
229+
gomega.Expect(err).NotTo(gomega.HaveOccurred())
230+
}
231+
232+
ginkgo.By("Waiting for all claims to be in bound state")
233+
persistentvolumes, err = fpv.WaitForPVClaimBoundPhase(ctx, client, pvclaims,
234+
2*framework.ClaimProvisionTimeout)
235+
gomega.Expect(err).NotTo(gomega.HaveOccurred())
236+
237+
// TODO: Add a logic to check for the no orphan volumes
238+
defer func() {
239+
for _, claim := range pvclaims {
240+
err := fpv.DeletePersistentVolumeClaim(ctx, client, claim.Name, namespace)
241+
gomega.Expect(err).NotTo(gomega.HaveOccurred())
242+
}
243+
ginkgo.By("Verify PVs, volumes are deleted from CNS")
244+
for _, pv := range persistentvolumes {
245+
err := fpv.WaitForPersistentVolumeDeleted(ctx, client, pv.Name, framework.Poll,
246+
framework.PodDeleteTimeout)
247+
gomega.Expect(err).NotTo(gomega.HaveOccurred())
248+
volumeID := pv.Spec.CSI.VolumeHandle
249+
err = e2eVSphere.waitForCNSVolumeToBeDeleted(volumeID)
250+
gomega.Expect(err).NotTo(gomega.HaveOccurred(),
251+
fmt.Sprintf("Volume: %s should not be present in the "+
252+
"CNS after it is deleted from kubernetes", volumeID))
253+
}
254+
}()
255+
for _, pv := range persistentvolumes {
256+
volumeID := pv.Spec.CSI.VolumeHandle
257+
time.Sleep(30 * time.Second)
258+
259+
ginkgo.By("Create CNS unregister volume with above created FCD " + pv.Spec.CSI.VolumeHandle)
260+
cnsUnRegisterVolume := getCNSUnRegisterVolumeSpec(ctx, namespace, volumeID)
261+
err = createCNSUnRegisterVolume(ctx, restConfig, cnsUnRegisterVolume)
262+
gomega.Expect(err).NotTo(gomega.HaveOccurred())
263+
framework.ExpectNoError(waitForCNSUnRegisterVolumeToGetCreated(ctx,
264+
restConfig, namespace, cnsUnRegisterVolume, poll, supervisorClusterOperationsTimeout))
265+
cnsRegisterVolumeName := cnsUnRegisterVolume.GetName()
266+
framework.Logf("CNS register volume name : %s", cnsRegisterVolumeName)
267+
}
268+
269+
ginkgo.By("Verify PVs, volumes are deleted from CNS")
270+
for _, pv := range persistentvolumes {
271+
err := fpv.WaitForPersistentVolumeDeleted(ctx, client, pv.Name, framework.Poll,
272+
framework.PodDeleteTimeout)
273+
gomega.Expect(err).NotTo(gomega.HaveOccurred())
274+
volumeID := pv.Spec.CSI.VolumeHandle
275+
err = e2eVSphere.waitForCNSVolumeToBeDeleted(volumeID)
276+
gomega.Expect(err).NotTo(gomega.HaveOccurred(),
277+
fmt.Sprintf("Volume: %s should not be present in the "+
278+
"CNS after it is deleted from kubernetes", volumeID))
279+
}
280+
281+
defaultDatastore = getDefaultDatastore(ctx)
282+
ginkgo.By(fmt.Sprintf("defaultDatastore %v sec", defaultDatastore))
283+
284+
for _, pv1 := range persistentvolumes {
285+
ginkgo.By(fmt.Sprintf("Deleting FCD: %s", pv1.Spec.CSI.VolumeHandle))
286+
err = deleteFcdWithRetriesForSpecificErr(ctx, pv1.Spec.CSI.VolumeHandle, defaultDatastore.Reference(),
287+
[]string{disklibUnlinkErr}, []string{objOrItemNotFoundErr})
288+
gomega.Expect(err).NotTo(gomega.HaveOccurred())
289+
}
290+
}

tests/e2e/util.go

+73
Original file line numberDiff line numberDiff line change
@@ -88,6 +88,7 @@ import (
8888
cnsfileaccessconfigv1alpha1 "sigs.k8s.io/vsphere-csi-driver/v3/pkg/apis/cnsoperator/cnsfileaccessconfig/v1alpha1"
8989
cnsnodevmattachmentv1alpha1 "sigs.k8s.io/vsphere-csi-driver/v3/pkg/apis/cnsoperator/cnsnodevmattachment/v1alpha1"
9090
cnsregistervolumev1alpha1 "sigs.k8s.io/vsphere-csi-driver/v3/pkg/apis/cnsoperator/cnsregistervolume/v1alpha1"
91+
cnsunregistervolumev1alpha1 "sigs.k8s.io/vsphere-csi-driver/v3/pkg/apis/cnsoperator/cnsunregistervolume/v1alpha1"
9192
cnsvolumemetadatav1alpha1 "sigs.k8s.io/vsphere-csi-driver/v3/pkg/apis/cnsoperator/cnsvolumemetadata/v1alpha1"
9293
storagepolicyv1alpha2 "sigs.k8s.io/vsphere-csi-driver/v3/pkg/apis/cnsoperator/storagepolicy/v1alpha2"
9394
k8s "sigs.k8s.io/vsphere-csi-driver/v3/pkg/kubernetes"
@@ -3243,6 +3244,31 @@ func getCNSRegisterVolumeSpec(ctx context.Context, namespace string, fcdID strin
32433244
return cnsRegisterVolume
32443245
}
32453246

3247+
// Function to create CnsUnRegisterVolume spec, with given FCD ID
3248+
func getCNSUnRegisterVolumeSpec(ctx context.Context, namespace string,
3249+
fcdID string) *cnsunregistervolumev1alpha1.CnsUnregisterVolume {
3250+
var (
3251+
cnsUnRegisterVolume *cnsunregistervolumev1alpha1.CnsUnregisterVolume
3252+
)
3253+
framework.Logf("get CNSUnRegisterVolume spec")
3254+
3255+
cnsUnRegisterVolume = &cnsunregistervolumev1alpha1.CnsUnregisterVolume{
3256+
TypeMeta: metav1.TypeMeta{},
3257+
ObjectMeta: metav1.ObjectMeta{
3258+
GenerateName: "cnsunregvol-",
3259+
Namespace: namespace,
3260+
},
3261+
Spec: cnsunregistervolumev1alpha1.CnsUnregisterVolumeSpec{
3262+
VolumeID: fcdID,
3263+
},
3264+
}
3265+
3266+
if fcdID != "" {
3267+
cnsUnRegisterVolume.Spec.VolumeID = fcdID
3268+
}
3269+
return cnsUnRegisterVolume
3270+
}
3271+
32463272
// Create CNS register volume.
32473273
func createCNSRegisterVolume(ctx context.Context, restConfig *rest.Config,
32483274
cnsRegisterVolume *cnsregistervolumev1alpha1.CnsRegisterVolume) error {
@@ -3255,6 +3281,18 @@ func createCNSRegisterVolume(ctx context.Context, restConfig *rest.Config,
32553281
return err
32563282
}
32573283

3284+
// Create CNS Unregister volume.
3285+
func createCNSUnRegisterVolume(ctx context.Context, restConfig *rest.Config,
3286+
cnsUnRegisterVolume *cnsunregistervolumev1alpha1.CnsUnregisterVolume) error {
3287+
3288+
cnsOperatorClient, err := k8s.NewClientForGroup(ctx, restConfig, cnsoperatorv1alpha1.GroupName)
3289+
gomega.Expect(err).NotTo(gomega.HaveOccurred())
3290+
framework.Logf("Create CNSUnRegisterVolume")
3291+
err = cnsOperatorClient.Create(ctx, cnsUnRegisterVolume)
3292+
3293+
return err
3294+
}
3295+
32583296
// Query CNS Register volume. Returns true if the CNSRegisterVolume is
32593297
// available otherwise false.
32603298
func queryCNSRegisterVolume(ctx context.Context, restClientConfig *rest.Config,
@@ -3329,6 +3367,21 @@ func getCNSRegistervolume(ctx context.Context, restClientConfig *rest.Config,
33293367
return cns
33303368
}
33313369

3370+
// Get CNS Unregister volume.
3371+
func getCNSUnRegistervolume(ctx context.Context,
3372+
restClientConfig *rest.Config, cnsUnRegisterVolume *cnsunregistervolumev1alpha1.
3373+
CnsUnregisterVolume) *cnsunregistervolumev1alpha1.CnsUnregisterVolume {
3374+
cnsOperatorClient, err := k8s.NewClientForGroup(ctx, restClientConfig, cnsoperatorv1alpha1.GroupName)
3375+
gomega.Expect(err).NotTo(gomega.HaveOccurred())
3376+
3377+
cns := &cnsunregistervolumev1alpha1.CnsUnregisterVolume{}
3378+
err = cnsOperatorClient.Get(ctx,
3379+
pkgtypes.NamespacedName{Name: cnsUnRegisterVolume.Name, Namespace: cnsUnRegisterVolume.Namespace}, cns)
3380+
gomega.Expect(err).NotTo(gomega.HaveOccurred())
3381+
3382+
return cns
3383+
}
3384+
33323385
// Update CNS register volume.
33333386
func updateCNSRegistervolume(ctx context.Context, restClientConfig *rest.Config,
33343387
cnsRegisterVolume *cnsregistervolumev1alpha1.CnsRegisterVolume) *cnsregistervolumev1alpha1.CnsRegisterVolume {
@@ -4051,6 +4104,26 @@ func waitForCNSRegisterVolumeToGetCreated(ctx context.Context, restConfig *rest.
40514104
return fmt.Errorf("cnsRegisterVolume %s creation is failed within %v", cnsRegisterVolumeName, timeout)
40524105
}
40534106

4107+
// waitForCNSUnRegisterVolumeToGetCreated waits for a cnsUnRegisterVolume to get
4108+
// created or until timeout occurs, whichever comes first.
4109+
func waitForCNSUnRegisterVolumeToGetCreated(ctx context.Context, restConfig *rest.Config, namespace string,
4110+
cnsUnRegisterVolume *cnsunregistervolumev1alpha1.CnsUnregisterVolume, Poll, timeout time.Duration) error {
4111+
framework.Logf("Waiting up to %v for CnsUnRegisterVolume %s to get created", timeout, cnsUnRegisterVolume)
4112+
4113+
cnsUnRegisterVolumeName := cnsUnRegisterVolume.GetName()
4114+
for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) {
4115+
cnsUnRegisterVolume = getCNSUnRegistervolume(ctx, restConfig, cnsUnRegisterVolume)
4116+
flag := cnsUnRegisterVolume.Status.Unregistered
4117+
if !flag {
4118+
continue
4119+
} else {
4120+
return nil
4121+
}
4122+
}
4123+
4124+
return fmt.Errorf("cnsRegisterVolume %s creation is failed within %v", cnsUnRegisterVolumeName, timeout)
4125+
}
4126+
40544127
// waitForCNSRegisterVolumeToGetDeleted waits for a cnsRegisterVolume to get
40554128
// deleted or until timeout occurs, whichever comes first.
40564129
func waitForCNSRegisterVolumeToGetDeleted(ctx context.Context, restConfig *rest.Config, namespace string,

0 commit comments

Comments
 (0)