Skip to content

Commit ddbedaf

Browse files
authored
feat: attach sparse .vmdk when creating r/virtual_machine (#2383)
Added support for creating `r/virtual_machine` from existing sparse `.vmdk`. Signed-off-by: Stoyan Zhelyazkov <[email protected]>
1 parent da04da6 commit ddbedaf

File tree

2 files changed

+136
-33
lines changed

2 files changed

+136
-33
lines changed

vsphere/internal/virtualdevice/virtual_machine_disk_subresource.go

Lines changed: 63 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -1436,10 +1436,48 @@ func (r *DiskSubresource) Read(l object.VirtualDeviceList) error {
14361436
attach = r.Get("attach").(bool)
14371437
}
14381438
// Save disk backing settings
1439-
b, ok := disk.Backing.(*types.VirtualDiskFlatVer2BackingInfo)
1440-
if !ok {
1439+
if b, ok := disk.Backing.(*types.VirtualDiskFlatVer2BackingInfo); ok {
1440+
if err := r.setFlatBackingProperties(b, disk, attach); err != nil {
1441+
return err
1442+
}
1443+
} else if b, ok := disk.Backing.(*types.VirtualDiskSparseVer2BackingInfo); ok {
1444+
if err := r.setSparseBackingProperties(b, disk, attach); err != nil {
1445+
return err
1446+
}
1447+
} else {
14411448
return fmt.Errorf("disk backing at %s is of an unsupported type (type %T)", r.Get("device_address").(string), disk.Backing)
14421449
}
1450+
1451+
if allocation := disk.StorageIOAllocation; allocation != nil {
1452+
r.Set("io_limit", allocation.Limit)
1453+
r.Set("io_reservation", allocation.Reservation)
1454+
if shares := allocation.Shares; shares != nil {
1455+
r.Set("io_share_level", string(shares.Level))
1456+
r.Set("io_share_count", shares.Shares)
1457+
}
1458+
}
1459+
1460+
if spbm.IsSupported(r.client) {
1461+
// Set storage policy if the VM exists.
1462+
vmUUID := r.rdd.Id()
1463+
if vmUUID != "" {
1464+
result, err := virtualmachine.MOIDForUUID(r.client, vmUUID)
1465+
if err != nil {
1466+
return err
1467+
}
1468+
polID, err := spbm.PolicyIDByVirtualDisk(r.client, result.MOID, r.Get("key").(int))
1469+
if err != nil {
1470+
return err
1471+
}
1472+
r.Set("storage_policy_id", polID)
1473+
}
1474+
}
1475+
1476+
log.Printf("[DEBUG] %s: Read finished (key and device address may have changed)", r)
1477+
return nil
1478+
}
1479+
1480+
func (r *DiskSubresource) setFlatBackingProperties(b *types.VirtualDiskFlatVer2BackingInfo, disk *types.VirtualDisk, attach bool) error {
14431481
r.Set("uuid", b.Uuid)
14441482
r.Set("disk_mode", b.DiskMode)
14451483
r.Set("write_through", b.WriteThrough)
@@ -1469,29 +1507,24 @@ func (r *DiskSubresource) Read(l object.VirtualDeviceList) error {
14691507
r.Set("size", diskCapacityInGiB(disk))
14701508
}
14711509

1472-
if allocation := disk.StorageIOAllocation; allocation != nil {
1473-
r.Set("io_limit", allocation.Limit)
1474-
r.Set("io_reservation", allocation.Reservation)
1475-
if shares := allocation.Shares; shares != nil {
1476-
r.Set("io_share_level", string(shares.Level))
1477-
r.Set("io_share_count", shares.Shares)
1478-
}
1479-
}
1510+
return nil
1511+
}
14801512

1481-
if spbm.IsSupported(r.client) {
1482-
// Set storage policy if the VM exists.
1483-
vmUUID := r.rdd.Id()
1484-
if vmUUID != "" {
1485-
result, err := virtualmachine.MOIDForUUID(r.client, vmUUID)
1486-
if err != nil {
1487-
return err
1488-
}
1489-
polID, err := spbm.PolicyIDByVirtualDisk(r.client, result.MOID, r.Get("key").(int))
1490-
if err != nil {
1491-
return err
1492-
}
1493-
r.Set("storage_policy_id", polID)
1513+
func (r *DiskSubresource) setSparseBackingProperties(b *types.VirtualDiskSparseVer2BackingInfo, disk *types.VirtualDisk, attach bool) error {
1514+
r.Set("uuid", b.Uuid)
1515+
r.Set("disk_mode", b.DiskMode)
1516+
r.Set("write_through", b.WriteThrough)
1517+
1518+
r.Set("datastore_id", b.Datastore.Value)
1519+
1520+
// Disk settings
1521+
if !attach {
1522+
dp := &object.DatastorePath{}
1523+
if ok := dp.FromString(b.FileName); !ok {
1524+
return fmt.Errorf("could not parse path from filename: %s", b.FileName)
14941525
}
1526+
r.Set("path", dp.Path)
1527+
r.Set("size", diskCapacityInGiB(disk))
14951528
}
14961529

14971530
log.Printf("[DEBUG] %s: Read finished (key and device address may have changed)", r)
@@ -2364,14 +2397,15 @@ func diskUUIDMatch(device types.BaseVirtualDevice, uuid string) bool {
23642397
if !ok {
23652398
return false
23662399
}
2367-
backing, ok := disk.Backing.(*types.VirtualDiskFlatVer2BackingInfo)
2368-
if !ok {
2369-
return false
2400+
2401+
if backing, ok := disk.Backing.(*types.VirtualDiskFlatVer2BackingInfo); ok {
2402+
return backing.Uuid == uuid
23702403
}
2371-
if backing.Uuid != uuid {
2372-
return false
2404+
if backing, ok := disk.Backing.(*types.VirtualDiskSparseVer2BackingInfo); ok {
2405+
return backing.Uuid == uuid
23732406
}
2374-
return true
2407+
2408+
return false
23752409
}
23762410

23772411
// diskCapacityInGiB reports the supplied disk's capacity, by first checking

vsphere/resource_vsphere_virtual_machine_test.go

Lines changed: 73 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -100,8 +100,35 @@ func TestAccResourceVSphereVirtualMachine_hardwareVersionBare(t *testing.T) {
100100
})
101101
}
102102

103+
func TestAccResourceVSphereVirtualMachine_fromSparseVmdk(t *testing.T) {
104+
t.Skipf("requires an existing 2gbsparse vmdk")
105+
// to run this test you need to create a vmdk on the target datastore
106+
// 1. ssh to an ESXi host with connection to the datastore
107+
// 2. create an empty folder on the datastore and `cd` inside (e.g. /vmfs/volumes/67fe111f-489f3741-b381-02007873e8d0/sparsedisks)
108+
// 3. create a regular sparse vmdk - vmkfstools -c 2g -d sesparse sparse.vmdk
109+
// 4. create a 2gbsparse vmdk from the regular one - vmkfstools -i sparse.vmdk sparse2.vmdk -d 2gbsparse
110+
// the test is pre-configured to look for a disk at [acc-test-nfs] sparsedisks/sparse2.vmdk. change if necessary
111+
resource.Test(t, resource.TestCase{
112+
PreCheck: func() {
113+
RunSweepers()
114+
testAccPreCheck(t)
115+
},
116+
Providers: testAccProviders,
117+
CheckDestroy: testAccResourceVSphereVirtualMachineCheckExists(false),
118+
Steps: []resource.TestStep{
119+
{
120+
Config: testAccResourceVSphereVirtualMachineConfigFromSparseVmdk(),
121+
Check: resource.ComposeTestCheckFunc(
122+
testAccResourceVSphereVirtualMachineCheckExists(true),
123+
resource.TestMatchResourceAttr("vsphere_virtual_machine.vm", "moid", regexp.MustCompile("^vm-")),
124+
),
125+
},
126+
},
127+
})
128+
}
129+
103130
func TestAccResourceVSphereVirtualMachine_vtpmCreate(t *testing.T) {
104-
t.Skipf("Requires key management server to run")
131+
t.Skipf("requires key management server to run")
105132
resource.Test(t, resource.TestCase{
106133
PreCheck: func() {
107134
RunSweepers()
@@ -122,7 +149,7 @@ func TestAccResourceVSphereVirtualMachine_vtpmCreate(t *testing.T) {
122149
}
123150

124151
func TestAccResourceVSphereVirtualMachine_vtpmAdd(t *testing.T) {
125-
t.Skipf("Requires key management server to run")
152+
t.Skipf("requires key management server to run")
126153
resource.Test(t, resource.TestCase{
127154
PreCheck: func() {
128155
RunSweepers()
@@ -150,7 +177,7 @@ func TestAccResourceVSphereVirtualMachine_vtpmAdd(t *testing.T) {
150177
}
151178

152179
func TestAccResourceVSphereVirtualMachine_vtpmRemove(t *testing.T) {
153-
t.Skipf("Requires key management server to run")
180+
t.Skipf("requires key management server to run")
154181
resource.Test(t, resource.TestCase{
155182
PreCheck: func() {
156183
RunSweepers()
@@ -178,7 +205,7 @@ func TestAccResourceVSphereVirtualMachine_vtpmRemove(t *testing.T) {
178205
}
179206

180207
func TestAccResourceVSphereVirtualMachine_vtpmClone(t *testing.T) {
181-
t.Skipf("Requires key management server to run")
208+
t.Skipf("requires key management server to run")
182209
resource.Test(t, resource.TestCase{
183210
PreCheck: func() {
184211
RunSweepers()
@@ -4145,6 +4172,48 @@ resource "vsphere_virtual_machine" "vm" {
41454172
)
41464173
}
41474174

4175+
func testAccResourceVSphereVirtualMachineConfigFromSparseVmdk() string {
4176+
return fmt.Sprintf(`
4177+
4178+
4179+
%s // Mix and match config
4180+
4181+
resource "vsphere_virtual_machine" "vm" {
4182+
name = "testacc-test"
4183+
resource_pool_id = vsphere_resource_pool.pool1.id
4184+
datastore_id = data.vsphere_datastore.rootds1.id
4185+
4186+
num_cpus = 2
4187+
memory = 2048
4188+
guest_id = "other3xLinuxGuest"
4189+
firmware = "efi"
4190+
4191+
wait_for_guest_net_timeout = 0
4192+
4193+
cdrom {
4194+
client_device = true
4195+
}
4196+
4197+
network_interface {
4198+
network_id = data.vsphere_network.network1.id
4199+
}
4200+
4201+
disk {
4202+
label = "disk0"
4203+
attach = true
4204+
datastore_id = data.vsphere_datastore.rootds1.id
4205+
path = "[acc-test-nfs] sparsedisks/sparse2.vmdk"
4206+
controller_type = "ide"
4207+
unit_number = 1
4208+
io_reservation = 1
4209+
}
4210+
}
4211+
`,
4212+
4213+
testAccResourceVSphereVirtualMachineConfigBase(),
4214+
)
4215+
}
4216+
41484217
func testAccResourceVSphereVirtualMachineConfigSharedSCSIBus() string {
41494218
return fmt.Sprintf(`
41504219

0 commit comments

Comments
 (0)