Skip to content

feat: attach sparse .vmdk when creating r/virtual_machine #2383

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 2 commits into from
Jun 2, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -1436,10 +1436,48 @@ func (r *DiskSubresource) Read(l object.VirtualDeviceList) error {
attach = r.Get("attach").(bool)
}
// Save disk backing settings
b, ok := disk.Backing.(*types.VirtualDiskFlatVer2BackingInfo)
if !ok {
if b, ok := disk.Backing.(*types.VirtualDiskFlatVer2BackingInfo); ok {
if err := r.setFlatBackingProperties(b, disk, attach); err != nil {
return err
}
} else if b, ok := disk.Backing.(*types.VirtualDiskSparseVer2BackingInfo); ok {
if err := r.setSparseBackingProperties(b, disk, attach); err != nil {
return err
}
} else {
return fmt.Errorf("disk backing at %s is of an unsupported type (type %T)", r.Get("device_address").(string), disk.Backing)
}

if allocation := disk.StorageIOAllocation; allocation != nil {
r.Set("io_limit", allocation.Limit)
r.Set("io_reservation", allocation.Reservation)
if shares := allocation.Shares; shares != nil {
r.Set("io_share_level", string(shares.Level))
r.Set("io_share_count", shares.Shares)
}
}

if spbm.IsSupported(r.client) {
// Set storage policy if the VM exists.
vmUUID := r.rdd.Id()
if vmUUID != "" {
result, err := virtualmachine.MOIDForUUID(r.client, vmUUID)
if err != nil {
return err
}
polID, err := spbm.PolicyIDByVirtualDisk(r.client, result.MOID, r.Get("key").(int))
if err != nil {
return err
}
r.Set("storage_policy_id", polID)
}
}

log.Printf("[DEBUG] %s: Read finished (key and device address may have changed)", r)
return nil
}

func (r *DiskSubresource) setFlatBackingProperties(b *types.VirtualDiskFlatVer2BackingInfo, disk *types.VirtualDisk, attach bool) error {
r.Set("uuid", b.Uuid)
r.Set("disk_mode", b.DiskMode)
r.Set("write_through", b.WriteThrough)
Expand Down Expand Up @@ -1469,29 +1507,24 @@ func (r *DiskSubresource) Read(l object.VirtualDeviceList) error {
r.Set("size", diskCapacityInGiB(disk))
}

if allocation := disk.StorageIOAllocation; allocation != nil {
r.Set("io_limit", allocation.Limit)
r.Set("io_reservation", allocation.Reservation)
if shares := allocation.Shares; shares != nil {
r.Set("io_share_level", string(shares.Level))
r.Set("io_share_count", shares.Shares)
}
}
return nil
}

if spbm.IsSupported(r.client) {
// Set storage policy if the VM exists.
vmUUID := r.rdd.Id()
if vmUUID != "" {
result, err := virtualmachine.MOIDForUUID(r.client, vmUUID)
if err != nil {
return err
}
polID, err := spbm.PolicyIDByVirtualDisk(r.client, result.MOID, r.Get("key").(int))
if err != nil {
return err
}
r.Set("storage_policy_id", polID)
func (r *DiskSubresource) setSparseBackingProperties(b *types.VirtualDiskSparseVer2BackingInfo, disk *types.VirtualDisk, attach bool) error {
r.Set("uuid", b.Uuid)
r.Set("disk_mode", b.DiskMode)
r.Set("write_through", b.WriteThrough)

r.Set("datastore_id", b.Datastore.Value)

// Disk settings
if !attach {
dp := &object.DatastorePath{}
if ok := dp.FromString(b.FileName); !ok {
return fmt.Errorf("could not parse path from filename: %s", b.FileName)
}
r.Set("path", dp.Path)
r.Set("size", diskCapacityInGiB(disk))
}

log.Printf("[DEBUG] %s: Read finished (key and device address may have changed)", r)
Expand Down Expand Up @@ -2364,14 +2397,15 @@ func diskUUIDMatch(device types.BaseVirtualDevice, uuid string) bool {
if !ok {
return false
}
backing, ok := disk.Backing.(*types.VirtualDiskFlatVer2BackingInfo)
if !ok {
return false

if backing, ok := disk.Backing.(*types.VirtualDiskFlatVer2BackingInfo); ok {
return backing.Uuid == uuid
}
if backing.Uuid != uuid {
return false
if backing, ok := disk.Backing.(*types.VirtualDiskSparseVer2BackingInfo); ok {
return backing.Uuid == uuid
}
return true

return false
}

// diskCapacityInGiB reports the supplied disk's capacity, by first checking
Expand Down
77 changes: 73 additions & 4 deletions vsphere/resource_vsphere_virtual_machine_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -100,8 +100,35 @@ func TestAccResourceVSphereVirtualMachine_hardwareVersionBare(t *testing.T) {
})
}

func TestAccResourceVSphereVirtualMachine_fromSparseVmdk(t *testing.T) {
t.Skipf("requires an existing 2gbsparse vmdk")
// to run this test you need to create a vmdk on the target datastore
// 1. ssh to an ESXi host with connection to the datastore
// 2. create an empty folder on the datastore and `cd` inside (e.g. /vmfs/volumes/67fe111f-489f3741-b381-02007873e8d0/sparsedisks)
// 3. create a regular sparse vmdk - vmkfstools -c 2g -d sesparse sparse.vmdk
// 4. create a 2gbsparse vmdk from the regular one - vmkfstools -i sparse.vmdk sparse2.vmdk -d 2gbsparse
// the test is pre-configured to look for a disk at [acc-test-nfs] sparsedisks/sparse2.vmdk. change if necessary
resource.Test(t, resource.TestCase{
PreCheck: func() {
RunSweepers()
testAccPreCheck(t)
},
Providers: testAccProviders,
CheckDestroy: testAccResourceVSphereVirtualMachineCheckExists(false),
Steps: []resource.TestStep{
{
Config: testAccResourceVSphereVirtualMachineConfigFromSparseVmdk(),
Check: resource.ComposeTestCheckFunc(
testAccResourceVSphereVirtualMachineCheckExists(true),
resource.TestMatchResourceAttr("vsphere_virtual_machine.vm", "moid", regexp.MustCompile("^vm-")),
),
},
},
})
}

func TestAccResourceVSphereVirtualMachine_vtpmCreate(t *testing.T) {
t.Skipf("Requires key management server to run")
t.Skipf("requires key management server to run")
resource.Test(t, resource.TestCase{
PreCheck: func() {
RunSweepers()
Expand All @@ -122,7 +149,7 @@ func TestAccResourceVSphereVirtualMachine_vtpmCreate(t *testing.T) {
}

func TestAccResourceVSphereVirtualMachine_vtpmAdd(t *testing.T) {
t.Skipf("Requires key management server to run")
t.Skipf("requires key management server to run")
resource.Test(t, resource.TestCase{
PreCheck: func() {
RunSweepers()
Expand Down Expand Up @@ -150,7 +177,7 @@ func TestAccResourceVSphereVirtualMachine_vtpmAdd(t *testing.T) {
}

func TestAccResourceVSphereVirtualMachine_vtpmRemove(t *testing.T) {
t.Skipf("Requires key management server to run")
t.Skipf("requires key management server to run")
resource.Test(t, resource.TestCase{
PreCheck: func() {
RunSweepers()
Expand Down Expand Up @@ -178,7 +205,7 @@ func TestAccResourceVSphereVirtualMachine_vtpmRemove(t *testing.T) {
}

func TestAccResourceVSphereVirtualMachine_vtpmClone(t *testing.T) {
t.Skipf("Requires key management server to run")
t.Skipf("requires key management server to run")
resource.Test(t, resource.TestCase{
PreCheck: func() {
RunSweepers()
Expand Down Expand Up @@ -4145,6 +4172,48 @@ resource "vsphere_virtual_machine" "vm" {
)
}

func testAccResourceVSphereVirtualMachineConfigFromSparseVmdk() string {
return fmt.Sprintf(`


%s // Mix and match config

resource "vsphere_virtual_machine" "vm" {
name = "testacc-test"
resource_pool_id = vsphere_resource_pool.pool1.id
datastore_id = data.vsphere_datastore.rootds1.id

num_cpus = 2
memory = 2048
guest_id = "other3xLinuxGuest"
firmware = "efi"

wait_for_guest_net_timeout = 0

cdrom {
client_device = true
}

network_interface {
network_id = data.vsphere_network.network1.id
}

disk {
label = "disk0"
attach = true
datastore_id = data.vsphere_datastore.rootds1.id
path = "[acc-test-nfs] sparsedisks/sparse2.vmdk"
controller_type = "ide"
unit_number = 1
io_reservation = 1
}
}
`,

testAccResourceVSphereVirtualMachineConfigBase(),
)
}

func testAccResourceVSphereVirtualMachineConfigSharedSCSIBus() string {
return fmt.Sprintf(`

Expand Down