From 011167dd894d46023d99ec7acdd3eefb6c238ce1 Mon Sep 17 00:00:00 2001 From: Stoyan Zhelyazkov Date: Tue, 22 Apr 2025 17:42:29 +0300 Subject: [PATCH 1/2] support attaching existing sparse vmdk when creating VM Signed-off-by: Stoyan Zhelyazkov --- .../virtual_machine_disk_subresource.go | 92 +++++++++++++------ 1 file changed, 63 insertions(+), 29 deletions(-) diff --git a/vsphere/internal/virtualdevice/virtual_machine_disk_subresource.go b/vsphere/internal/virtualdevice/virtual_machine_disk_subresource.go index 7cc9ff32f..785baf48b 100644 --- a/vsphere/internal/virtualdevice/virtual_machine_disk_subresource.go +++ b/vsphere/internal/virtualdevice/virtual_machine_disk_subresource.go @@ -1436,10 +1436,48 @@ func (r *DiskSubresource) Read(l object.VirtualDeviceList) error { attach = r.Get("attach").(bool) } // Save disk backing settings - b, ok := disk.Backing.(*types.VirtualDiskFlatVer2BackingInfo) - if !ok { + if b, ok := disk.Backing.(*types.VirtualDiskFlatVer2BackingInfo); ok { + if err := r.setFlatBackingProperties(b, disk, attach); err != nil { + return err + } + } else if b, ok := disk.Backing.(*types.VirtualDiskSparseVer2BackingInfo); ok { + if err := r.setSparseBackingProperties(b, disk, attach); err != nil { + return err + } + } else { return fmt.Errorf("disk backing at %s is of an unsupported type (type %T)", r.Get("device_address").(string), disk.Backing) } + + if allocation := disk.StorageIOAllocation; allocation != nil { + r.Set("io_limit", allocation.Limit) + r.Set("io_reservation", allocation.Reservation) + if shares := allocation.Shares; shares != nil { + r.Set("io_share_level", string(shares.Level)) + r.Set("io_share_count", shares.Shares) + } + } + + if spbm.IsSupported(r.client) { + // Set storage policy if the VM exists. + vmUUID := r.rdd.Id() + if vmUUID != "" { + result, err := virtualmachine.MOIDForUUID(r.client, vmUUID) + if err != nil { + return err + } + polID, err := spbm.PolicyIDByVirtualDisk(r.client, result.MOID, r.Get("key").(int)) + if err != nil { + return err + } + r.Set("storage_policy_id", polID) + } + } + + log.Printf("[DEBUG] %s: Read finished (key and device address may have changed)", r) + return nil +} + +func (r *DiskSubresource) setFlatBackingProperties(b *types.VirtualDiskFlatVer2BackingInfo, disk *types.VirtualDisk, attach bool) error { r.Set("uuid", b.Uuid) r.Set("disk_mode", b.DiskMode) r.Set("write_through", b.WriteThrough) @@ -1469,29 +1507,24 @@ func (r *DiskSubresource) Read(l object.VirtualDeviceList) error { r.Set("size", diskCapacityInGiB(disk)) } - if allocation := disk.StorageIOAllocation; allocation != nil { - r.Set("io_limit", allocation.Limit) - r.Set("io_reservation", allocation.Reservation) - if shares := allocation.Shares; shares != nil { - r.Set("io_share_level", string(shares.Level)) - r.Set("io_share_count", shares.Shares) - } - } + return nil +} - if spbm.IsSupported(r.client) { - // Set storage policy if the VM exists. - vmUUID := r.rdd.Id() - if vmUUID != "" { - result, err := virtualmachine.MOIDForUUID(r.client, vmUUID) - if err != nil { - return err - } - polID, err := spbm.PolicyIDByVirtualDisk(r.client, result.MOID, r.Get("key").(int)) - if err != nil { - return err - } - r.Set("storage_policy_id", polID) +func (r *DiskSubresource) setSparseBackingProperties(b *types.VirtualDiskSparseVer2BackingInfo, disk *types.VirtualDisk, attach bool) error { + r.Set("uuid", b.Uuid) + r.Set("disk_mode", b.DiskMode) + r.Set("write_through", b.WriteThrough) + + r.Set("datastore_id", b.Datastore.Value) + + // Disk settings + if !attach { + dp := &object.DatastorePath{} + if ok := dp.FromString(b.FileName); !ok { + return fmt.Errorf("could not parse path from filename: %s", b.FileName) } + r.Set("path", dp.Path) + r.Set("size", diskCapacityInGiB(disk)) } log.Printf("[DEBUG] %s: Read finished (key and device address may have changed)", r) @@ -2364,14 +2397,15 @@ func diskUUIDMatch(device types.BaseVirtualDevice, uuid string) bool { if !ok { return false } - backing, ok := disk.Backing.(*types.VirtualDiskFlatVer2BackingInfo) - if !ok { - return false + + if backing, ok := disk.Backing.(*types.VirtualDiskFlatVer2BackingInfo); ok { + return backing.Uuid == uuid } - if backing.Uuid != uuid { - return false + if backing, ok := disk.Backing.(*types.VirtualDiskSparseVer2BackingInfo); ok { + return backing.Uuid == uuid } - return true + + return false } // diskCapacityInGiB reports the supplied disk's capacity, by first checking From ec484587d0a014d9233f86bfc89ca08c0088f86a Mon Sep 17 00:00:00 2001 From: Stoyan Zhelyazkov Date: Mon, 2 Jun 2025 13:41:31 +0300 Subject: [PATCH 2/2] feat: add acc test for sparse disk Signed-off-by: Stoyan Zhelyazkov --- .../resource_vsphere_virtual_machine_test.go | 77 ++++++++++++++++++- 1 file changed, 73 insertions(+), 4 deletions(-) diff --git a/vsphere/resource_vsphere_virtual_machine_test.go b/vsphere/resource_vsphere_virtual_machine_test.go index e4b22bc3e..573faeecb 100644 --- a/vsphere/resource_vsphere_virtual_machine_test.go +++ b/vsphere/resource_vsphere_virtual_machine_test.go @@ -100,8 +100,35 @@ func TestAccResourceVSphereVirtualMachine_hardwareVersionBare(t *testing.T) { }) } +func TestAccResourceVSphereVirtualMachine_fromSparseVmdk(t *testing.T) { + t.Skipf("requires an existing 2gbsparse vmdk") + // to run this test you need to create a vmdk on the target datastore + // 1. ssh to an ESXi host with connection to the datastore + // 2. create an empty folder on the datastore and `cd` inside (e.g. /vmfs/volumes/67fe111f-489f3741-b381-02007873e8d0/sparsedisks) + // 3. create a regular sparse vmdk - vmkfstools -c 2g -d sesparse sparse.vmdk + // 4. create a 2gbsparse vmdk from the regular one - vmkfstools -i sparse.vmdk sparse2.vmdk -d 2gbsparse + // the test is pre-configured to look for a disk at [acc-test-nfs] sparsedisks/sparse2.vmdk. change if necessary + resource.Test(t, resource.TestCase{ + PreCheck: func() { + RunSweepers() + testAccPreCheck(t) + }, + Providers: testAccProviders, + CheckDestroy: testAccResourceVSphereVirtualMachineCheckExists(false), + Steps: []resource.TestStep{ + { + Config: testAccResourceVSphereVirtualMachineConfigFromSparseVmdk(), + Check: resource.ComposeTestCheckFunc( + testAccResourceVSphereVirtualMachineCheckExists(true), + resource.TestMatchResourceAttr("vsphere_virtual_machine.vm", "moid", regexp.MustCompile("^vm-")), + ), + }, + }, + }) +} + func TestAccResourceVSphereVirtualMachine_vtpmCreate(t *testing.T) { - t.Skipf("Requires key management server to run") + t.Skipf("requires key management server to run") resource.Test(t, resource.TestCase{ PreCheck: func() { RunSweepers() @@ -122,7 +149,7 @@ func TestAccResourceVSphereVirtualMachine_vtpmCreate(t *testing.T) { } func TestAccResourceVSphereVirtualMachine_vtpmAdd(t *testing.T) { - t.Skipf("Requires key management server to run") + t.Skipf("requires key management server to run") resource.Test(t, resource.TestCase{ PreCheck: func() { RunSweepers() @@ -150,7 +177,7 @@ func TestAccResourceVSphereVirtualMachine_vtpmAdd(t *testing.T) { } func TestAccResourceVSphereVirtualMachine_vtpmRemove(t *testing.T) { - t.Skipf("Requires key management server to run") + t.Skipf("requires key management server to run") resource.Test(t, resource.TestCase{ PreCheck: func() { RunSweepers() @@ -178,7 +205,7 @@ func TestAccResourceVSphereVirtualMachine_vtpmRemove(t *testing.T) { } func TestAccResourceVSphereVirtualMachine_vtpmClone(t *testing.T) { - t.Skipf("Requires key management server to run") + t.Skipf("requires key management server to run") resource.Test(t, resource.TestCase{ PreCheck: func() { RunSweepers() @@ -4145,6 +4172,48 @@ resource "vsphere_virtual_machine" "vm" { ) } +func testAccResourceVSphereVirtualMachineConfigFromSparseVmdk() string { + return fmt.Sprintf(` + + +%s // Mix and match config + +resource "vsphere_virtual_machine" "vm" { + name = "testacc-test" + resource_pool_id = vsphere_resource_pool.pool1.id + datastore_id = data.vsphere_datastore.rootds1.id + + num_cpus = 2 + memory = 2048 + guest_id = "other3xLinuxGuest" + firmware = "efi" + + wait_for_guest_net_timeout = 0 + + cdrom { + client_device = true + } + + network_interface { + network_id = data.vsphere_network.network1.id + } + + disk { + label = "disk0" + attach = true + datastore_id = data.vsphere_datastore.rootds1.id + path = "[acc-test-nfs] sparsedisks/sparse2.vmdk" + controller_type = "ide" + unit_number = 1 + io_reservation = 1 + } +} +`, + + testAccResourceVSphereVirtualMachineConfigBase(), + ) +} + func testAccResourceVSphereVirtualMachineConfigSharedSCSIBus() string { return fmt.Sprintf(`