From f38f6db3ec732cfd920e2e51acde0e780f5546ee Mon Sep 17 00:00:00 2001 From: Kishen Viswanathan Date: Sun, 23 Nov 2025 13:57:35 +0530 Subject: [PATCH] Pass on volume details when disk is in available state --- pkg/cloud/cloud_interface.go | 2 +- pkg/cloud/mocks/mock_cloud.go | 7 ++-- pkg/cloud/powervs.go | 25 +++++++---- pkg/driver/controller.go | 20 ++++++--- pkg/driver/controller_test.go | 78 +++++++++++++++++++++++++++++------ pkg/driver/sanity_test.go | 6 +-- tests/e2e/pre_provisioning.go | 2 +- 7 files changed, 106 insertions(+), 34 deletions(-) diff --git a/pkg/cloud/cloud_interface.go b/pkg/cloud/cloud_interface.go index bb60570d7..794e817fb 100644 --- a/pkg/cloud/cloud_interface.go +++ b/pkg/cloud/cloud_interface.go @@ -27,7 +27,7 @@ type Cloud interface { DetachDisk(volumeID string, nodeID string) (err error) ResizeDisk(volumeID string, reqSize int64) (newSize int64, err error) CloneDisk(sourceVolumeName string, cloneVolumeName string) (disk *Disk, err error) - WaitForVolumeState(volumeID, state string) error + WaitForVolumeState(volumeID, state string) (*models.Volume, error) WaitForCloneStatus(taskId string) error GetDiskByName(name string) (disk *Disk, err error) GetDiskByNamePrefix(namePrefix string) (disk *Disk, err error) diff --git a/pkg/cloud/mocks/mock_cloud.go b/pkg/cloud/mocks/mock_cloud.go index 19956b727..f859f0363 100644 --- a/pkg/cloud/mocks/mock_cloud.go +++ b/pkg/cloud/mocks/mock_cloud.go @@ -245,11 +245,12 @@ func (mr *MockCloudMockRecorder) WaitForCloneStatus(taskId any) *gomock.Call { } // WaitForVolumeState mocks base method. -func (m *MockCloud) WaitForVolumeState(volumeID, state string) error { +func (m *MockCloud) WaitForVolumeState(volumeID, state string) (*models.Volume, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "WaitForVolumeState", volumeID, state) - ret0, _ := ret[0].(error) - return ret0 + ret0, _ := ret[0].(*models.Volume) + ret1, _ := ret[1].(error) + return ret0, ret1 } // WaitForVolumeState indicates an expected call of WaitForVolumeState. diff --git a/pkg/cloud/powervs.go b/pkg/cloud/powervs.go index 6b5dd87e7..1ac7361ae 100644 --- a/pkg/cloud/powervs.go +++ b/pkg/cloud/powervs.go @@ -128,7 +128,7 @@ func (p *powerVSCloud) CreateDisk(volumeName string, diskOptions *DiskOptions) ( if err != nil { return nil, err } - err = p.WaitForVolumeState(*v.VolumeID, VolumeAvailableState) + v, err = p.WaitForVolumeState(*v.VolumeID, VolumeAvailableState) if err != nil { return nil, err } @@ -155,14 +155,16 @@ func (p *powerVSCloud) AttachDisk(volumeID string, nodeID string) (err error) { if err = p.volClient.Attach(nodeID, volumeID); err != nil { return err } - return p.WaitForVolumeState(volumeID, VolumeInUseState) + _, err = p.WaitForVolumeState(volumeID, VolumeInUseState) + return err } func (p *powerVSCloud) DetachDisk(volumeID string, nodeID string) (err error) { if err = p.volClient.Detach(nodeID, volumeID); err != nil { return err } - return p.WaitForVolumeState(volumeID, VolumeAvailableState) + _, err = p.WaitForVolumeState(volumeID, VolumeAvailableState) + return err } // IsAttached returns an error if a volume isn't attached to a node, else nil. @@ -216,24 +218,27 @@ func (p *powerVSCloud) CloneDisk(sourceVolumeID string, cloneVolumeName string) return nil, errors.New("cloned volume not found") } clonedVolumeID := clonedVolumeDetails.ClonedVolumes[0].ClonedVolumeID - err = p.WaitForVolumeState(clonedVolumeID, VolumeAvailableState) + _, err = p.WaitForVolumeState(clonedVolumeID, VolumeAvailableState) if err != nil { return nil, err } return p.GetDiskByID(clonedVolumeID) } -func (p *powerVSCloud) WaitForVolumeState(volumeID, state string) error { +func (p *powerVSCloud) WaitForVolumeState(volumeID, state string) (*models.Volume, error) { ctx := context.Background() + var vol *models.Volume + var err error klog.V(4).Infof("Waiting for volume %s to be in %q state", volumeID, state) - return wait.PollUntilContextTimeout(ctx, PollInterval, PollTimeout, true, func(ctx context.Context) (bool, error) { - v, err := p.volClient.Get(volumeID) + err = wait.PollUntilContextTimeout(ctx, PollInterval, PollTimeout, true, func(ctx context.Context) (bool, error) { + vol, err = p.volClient.Get(volumeID) if err != nil { return false, err } - spew.Dump(v) - return v.State == state, nil + spew.Dump(vol) + return vol.State == state, nil }) + return vol, err } func (p *powerVSCloud) WaitForCloneStatus(cloneTaskId string) error { @@ -249,12 +254,14 @@ func (p *powerVSCloud) WaitForCloneStatus(cloneTaskId string) error { } func (p *powerVSCloud) GetDiskByName(name string) (disk *Disk, err error) { + klog.Infof("Looking for volume by name: %s", name) vols, err := p.volClient.GetAll() if err != nil { return nil, err } for _, v := range vols.Volumes { if name == *v.Name { + klog.Infof("Found volume by name: %s", *v.Name) return &Disk{ Name: *v.Name, DiskType: *v.DiskType, diff --git a/pkg/driver/controller.go b/pkg/driver/controller.go index aa676c153..8039bac10 100644 --- a/pkg/driver/controller.go +++ b/pkg/driver/controller.go @@ -18,6 +18,7 @@ package driver import ( "context" + "errors" "fmt" "os" "strconv" @@ -182,7 +183,7 @@ func (d *controllerService) CreateVolume(ctx context.Context, req *csi.CreateVol } // Check if the disk already exists // Disk exists only if previous createVolume request fails due to any network/tcp error - disk, _ := d.cloud.GetDiskByName(volName) + disk, err := d.cloud.GetDiskByName(volName) if disk != nil { // wait for volume to be available as the volume already exists klog.V(3).Infof("CreateVolume: Found an existing volume %s in %q state.", volName, disk.State) @@ -191,15 +192,24 @@ func (d *controllerService) CreateVolume(ctx context.Context, req *csi.CreateVol return nil, err } if disk.State != cloud.VolumeAvailableState { - err = d.cloud.WaitForVolumeState(disk.VolumeID, cloud.VolumeAvailableState) + vol, err := d.cloud.WaitForVolumeState(disk.VolumeID, cloud.VolumeAvailableState) if err != nil { return nil, status.Errorf(codes.Internal, "Disk exists, but not in required state. Current:%s Required:%s", disk.State, cloud.VolumeAvailableState) } + // When the disk is still in the "Creating" state, the WWN will not be available. + // In such a case, once when the volume is available, assign the WWN to the disk if not already assigned. + if disk.WWN == "" { + disk.WWN = vol.Wwn + } } } else { - disk, err = d.cloud.CreateDisk(volName, opts) - if err != nil { - return nil, status.Errorf(codes.Internal, "Could not create volume %q: %v", volName, err) + if errors.Is(err, cloud.ErrNotFound) { + disk, err = d.cloud.CreateDisk(volName, opts) + if err != nil { + return nil, status.Errorf(codes.Internal, "Could not create volume %q: %v", volName, err) + } + } else { + return nil, status.Errorf(codes.Internal, "Could not find volume by name %q: %v", volName, err) } } klog.V(3).Infof("CreateVolume: created volume %s, took %s", volName, time.Since(start)) diff --git a/pkg/driver/controller_test.go b/pkg/driver/controller_test.go index 71e8fb9a8..e89818d50 100644 --- a/pkg/driver/controller_test.go +++ b/pkg/driver/controller_test.go @@ -27,6 +27,10 @@ import ( "sigs.k8s.io/ibm-powervs-block-csi-driver/pkg/cloud" "sigs.k8s.io/ibm-powervs-block-csi-driver/pkg/cloud/mocks" "sigs.k8s.io/ibm-powervs-block-csi-driver/pkg/util" + + "k8s.io/utils/ptr" + + "github.com/IBM-Cloud/power-go-client/power/models" ) const ( @@ -78,7 +82,7 @@ func TestCreateVolume(t *testing.T) { defer mockCtl.Finish() mockCloud := mocks.NewMockCloud(mockCtl) - mockCloud.EXPECT().GetDiskByName(gomock.Eq(req.Name)).Return(nil, nil) + mockCloud.EXPECT().GetDiskByName(gomock.Eq(req.Name)).Return(nil, cloud.ErrNotFound) mockCloud.EXPECT().CreateDisk(gomock.Eq(req.Name), gomock.Any()).Return(mockDisk, nil) powervsDriver := controllerService{ @@ -96,6 +100,56 @@ func TestCreateVolume(t *testing.T) { } }, }, + { + name: "create a standard volume with a resync triggered after successful creation", + testFunc: func(t *testing.T) { + req := &csi.CreateVolumeRequest{ + Name: "random-vol-name", + CapacityRange: stdCapRange, + VolumeCapabilities: stdVolCap, + Parameters: stdParams, + } + + ctx := context.Background() + + mockDisk := &cloud.Disk{ + VolumeID: "a1b2c3-d4e5f6", + CapacityGiB: util.BytesToGiB(stdVolSize), + DiskType: cloud.DefaultVolumeType, + } + + mockCtl := gomock.NewController(t) + defer mockCtl.Finish() + + mockCloud := mocks.NewMockCloud(mockCtl) + mockCloud.EXPECT().GetDiskByName(gomock.Eq(req.Name)).Return(nil, cloud.ErrNotFound) + mockCloud.EXPECT().CreateDisk(gomock.Eq(req.Name), gomock.Any()).Return(mockDisk, nil) + + mockCloud.EXPECT().GetDiskByName(gomock.Eq(req.Name)).Return(&cloud.Disk{VolumeID: "a1b2c3-d4e5f6", Name: req.Name, State: "creating", DiskType: "tier1", CapacityGiB: 5}, nil) + mockCloud.EXPECT().WaitForVolumeState("a1b2c3-d4e5f6", "available").Return(&models.Volume{State: "available"}, nil) + + powervsDriver := controllerService{ + cloud: mockCloud, + driverOptions: &Options{}, + volumeLocks: util.NewVolumeLocks(), + } + + if _, err := powervsDriver.CreateVolume(ctx, req); err != nil { + srvErr, ok := status.FromError(err) + if !ok { + t.Fatalf("Could not get error status code from error: %v", srvErr) + } + t.Fatalf("Unexpected error: %v", srvErr) + } + if _, err := powervsDriver.CreateVolume(ctx, req); err != nil { + srvErr, ok := status.FromError(err) + if !ok { + t.Fatalf("Could not get error status code from error: %v", srvErr) + } + t.Fatalf("Unexpected error: %v", srvErr) + } + }, + }, { name: "success normal with datasource PVC", testFunc: func(t *testing.T) { @@ -231,7 +285,7 @@ func TestCreateVolume(t *testing.T) { defer mockCtl.Finish() mockCloud := mocks.NewMockCloud(mockCtl) - mockCloud.EXPECT().GetDiskByName(gomock.Eq(req.Name)).Return(nil, nil) + mockCloud.EXPECT().GetDiskByName(gomock.Eq(req.Name)).Return(nil, cloud.ErrNotFound) mockCloud.EXPECT().CreateDisk(gomock.Eq(req.Name), mockDiskOpts).Return(mockDisk, nil) powervsDriver := controllerService{ @@ -286,7 +340,7 @@ func TestCreateVolume(t *testing.T) { defer mockCtl.Finish() mockCloud := mocks.NewMockCloud(mockCtl) - mockCloud.EXPECT().GetDiskByName(gomock.Eq(req.Name)).Return(nil, nil) + mockCloud.EXPECT().GetDiskByName(gomock.Eq(req.Name)).Return(nil, cloud.ErrNotFound) mockCloud.EXPECT().CreateDisk(gomock.Eq(req.Name), mockDiskOpts).Return(mockDisk, nil) powervsDriver := controllerService{ @@ -341,7 +395,7 @@ func TestCreateVolume(t *testing.T) { defer mockCtl.Finish() mockCloud := mocks.NewMockCloud(mockCtl) - mockCloud.EXPECT().GetDiskByName(gomock.Eq(req.Name)).Return(nil, nil) + mockCloud.EXPECT().GetDiskByName(gomock.Eq(req.Name)).Return(nil, cloud.ErrNotFound) mockCloud.EXPECT().CreateDisk(gomock.Eq(req.Name), mockDiskOpts).Return(mockDisk, nil) powervsDriver := controllerService{ @@ -429,7 +483,7 @@ func TestCreateVolume(t *testing.T) { defer mockCtl.Finish() mockCloud := mocks.NewMockCloud(mockCtl) - mockCloud.EXPECT().GetDiskByName(gomock.Eq(req.Name)).Return(nil, nil) + mockCloud.EXPECT().GetDiskByName(gomock.Eq(req.Name)).Return(nil, cloud.ErrNotFound) mockCloud.EXPECT().CreateDisk(gomock.Eq(req.Name), gomock.Any()).Return(mockDisk, nil) powervsDriver := controllerService{ @@ -448,7 +502,7 @@ func TestCreateVolume(t *testing.T) { // Subsequent call returns the created disk mockCloud.EXPECT().GetDiskByName(gomock.Eq(req.Name)).Return(mockDisk, nil) - mockCloud.EXPECT().WaitForVolumeState(gomock.Any(), gomock.Any()).Return(nil) + mockCloud.EXPECT().WaitForVolumeState(gomock.Any(), gomock.Any()).Return(&models.Volume{VolumeID: ptr.To("a1b2c3-d4e5f6"), VolumeType: "tier3"}, nil) resp, err := powervsDriver.CreateVolume(ctx, extraReq) if err != nil { srvErr, ok := status.FromError(err) @@ -511,7 +565,7 @@ func TestCreateVolume(t *testing.T) { defer mockCtl.Finish() mockCloud := mocks.NewMockCloud(mockCtl) - mockCloud.EXPECT().GetDiskByName(gomock.Eq(req.Name)).Return(nil, nil) + mockCloud.EXPECT().GetDiskByName(gomock.Eq(req.Name)).Return(nil, cloud.ErrNotFound) mockCloud.EXPECT().CreateDisk(gomock.Eq(req.Name), gomock.Any()).Return(mockDisk, nil) powervsDriver := controllerService{ @@ -573,7 +627,7 @@ func TestCreateVolume(t *testing.T) { defer mockCtl.Finish() mockCloud := mocks.NewMockCloud(mockCtl) - mockCloud.EXPECT().GetDiskByName(gomock.Eq(req.Name)).Return(nil, nil) + mockCloud.EXPECT().GetDiskByName(gomock.Eq(req.Name)).Return(nil, cloud.ErrNotFound) mockCloud.EXPECT().CreateDisk(gomock.Eq(req.Name), gomock.Any()).Return(mockDisk, nil) powervsDriver := controllerService{ @@ -632,7 +686,7 @@ func TestCreateVolume(t *testing.T) { defer mockCtl.Finish() mockCloud := mocks.NewMockCloud(mockCtl) - mockCloud.EXPECT().GetDiskByName(gomock.Eq(req.Name)).Return(nil, nil) + mockCloud.EXPECT().GetDiskByName(gomock.Eq(req.Name)).Return(nil, cloud.ErrNotFound) mockCloud.EXPECT().CreateDisk(gomock.Eq(req.Name), mockDiskOpts).Return(mockDisk, nil) powervsDriver := controllerService{ @@ -681,7 +735,7 @@ func TestCreateVolume(t *testing.T) { defer mockCtl.Finish() mockCloud := mocks.NewMockCloud(mockCtl) - mockCloud.EXPECT().GetDiskByName(gomock.Eq(req.Name)).Return(nil, nil) + mockCloud.EXPECT().GetDiskByName(gomock.Eq(req.Name)).Return(nil, cloud.ErrNotFound) mockCloud.EXPECT().CreateDisk(gomock.Eq(req.Name), mockDiskOpts).Return(mockDisk, nil) powervsDriver := controllerService{ @@ -730,7 +784,7 @@ func TestCreateVolume(t *testing.T) { defer mockCtl.Finish() mockCloud := mocks.NewMockCloud(mockCtl) - mockCloud.EXPECT().GetDiskByName(gomock.Eq(req.Name)).Return(nil, nil) + mockCloud.EXPECT().GetDiskByName(gomock.Eq(req.Name)).Return(nil, cloud.ErrNotFound) mockCloud.EXPECT().CreateDisk(gomock.Eq(req.Name), mockDiskOpts).Return(mockDisk, nil) powervsDriver := controllerService{ @@ -818,7 +872,7 @@ func TestCreateVolume(t *testing.T) { defer mockCtl.Finish() mockCloud := mocks.NewMockCloud(mockCtl) - mockCloud.EXPECT().GetDiskByName(gomock.Eq(req.Name)).Return(nil, nil) + mockCloud.EXPECT().GetDiskByName(gomock.Eq(req.Name)).Return(nil, cloud.ErrNotFound) mockCloud.EXPECT().CreateDisk(gomock.Eq(req.Name), mockDiskOpts).Return(mockDisk, nil) powervsDriver := controllerService{ diff --git a/pkg/driver/sanity_test.go b/pkg/driver/sanity_test.go index 228fce874..b4e4d899b 100644 --- a/pkg/driver/sanity_test.go +++ b/pkg/driver/sanity_test.go @@ -206,8 +206,8 @@ func (c *fakeCloudProvider) IsAttached(volumeID string, nodeID string) (err erro return nil } -func (c *fakeCloudProvider) WaitForVolumeState(volumeID, expectedState string) error { - return nil +func (c *fakeCloudProvider) WaitForVolumeState(volumeID, expectedState string) (*models.Volume, error) { + return &models.Volume{VolumeID: ptr.To("a1b2c3-d4e5f6")}, nil } func (c *fakeCloudProvider) WaitForCloneStatus(cloneTaskId string) error { @@ -224,7 +224,7 @@ func (c *fakeCloudProvider) GetDiskByName(name string) (*cloud.Disk, error) { } else if len(disks) == 1 { return disks[0].Disk, nil } - return nil, nil + return nil, cloud.ErrNotFound } func (c *fakeCloudProvider) GetDiskByNamePrefix(namePrefix string) (*cloud.Disk, error) { diff --git a/tests/e2e/pre_provisioning.go b/tests/e2e/pre_provisioning.go index 79a2c161f..8325e9ff0 100644 --- a/tests/e2e/pre_provisioning.go +++ b/tests/e2e/pre_provisioning.go @@ -96,7 +96,7 @@ var _ = Describe("[powervs-csi-e2e]Pre-Provisioned", func() { AfterEach(func() { skipManuallyDeletingVolume = true if !skipManuallyDeletingVolume { - err := cloud.WaitForVolumeState(volumeID, "detached") + _, err := cloud.WaitForVolumeState(volumeID, "detached") Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Detach volume has failed. err: %v", err)) err = cloud.DeleteDisk(volumeID) Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Disk deletion has failed. err: %v", err))