-
Notifications
You must be signed in to change notification settings - Fork 68
Expand file tree
/
Copy pathvirt.py
More file actions
2892 lines (2495 loc) · 113 KB
/
virt.py
File metadata and controls
2892 lines (2495 loc) · 113 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
from __future__ import annotations
import io
import ipaddress
import json
import logging
import os
import re
import secrets
import shlex
from collections import defaultdict
from contextlib import contextmanager
from functools import cache
from json import JSONDecodeError
from subprocess import run
from typing import TYPE_CHECKING, Any, Dict, List, Optional
import bitmath
import jinja2
import pexpect
import yaml
from benedict import benedict
from kubernetes.client import ApiException
from kubernetes.dynamic import DynamicClient
from kubernetes.dynamic.exceptions import NotFoundError
from ocp_resources.daemonset import DaemonSet
from ocp_resources.datavolume import DataVolume
from ocp_resources.kubevirt import KubeVirt
from ocp_resources.namespace import Namespace
from ocp_resources.node import Node
from ocp_resources.pod import Pod
from ocp_resources.resource import Resource, ResourceEditor, get_client
from ocp_resources.service import Service
from ocp_resources.storage_profile import StorageProfile
from ocp_resources.template import Template
from ocp_resources.virtual_machine import VirtualMachine
from ocp_resources.virtual_machine_clone import VirtualMachineClone
from ocp_resources.virtual_machine_instance import VirtualMachineInstance
from ocp_resources.virtual_machine_instance_migration import (
VirtualMachineInstanceMigration,
)
from ocp_utilities.exceptions import CommandExecFailed
from paramiko import ProxyCommandFailure
from pyhelper_utils.shell import run_command, run_ssh_commands
from pytest_testconfig import config as py_config
from rrmngmnt import Host, ssh, user
from timeout_sampler import TimeoutExpiredError, TimeoutSampler
import utilities.cpu
import utilities.data_utils
import utilities.infra
from libs.net.cluster import is_ipv6_single_stack_cluster
from utilities.console import Console
from utilities.constants import (
CLOUD_INIT_DISK_NAME,
CLOUD_INIT_NO_CLOUD,
CNV_VM_SSH_KEY_PATH,
DATA_SOURCE_NAME,
DATA_SOURCE_NAMESPACE,
DEFAULT_KUBEVIRT_CONDITIONS,
DV_DISK,
EVICTIONSTRATEGY,
IP_FAMILY_POLICY_PREFER_DUAL_STACK,
LINUX_AMD_64,
LINUX_STR,
MULTIARCH,
OS_FLAVOR_ALPINE,
OS_FLAVOR_CIRROS,
OS_FLAVOR_FEDORA,
OS_FLAVOR_WINDOWS,
OS_PROC_NAME,
ROOTDISK,
SSH_PORT_22,
TCP_TIMEOUT_30SEC,
TIMEOUT_1MIN,
TIMEOUT_1SEC,
TIMEOUT_2MIN,
TIMEOUT_3MIN,
TIMEOUT_4MIN,
TIMEOUT_5MIN,
TIMEOUT_5SEC,
TIMEOUT_6MIN,
TIMEOUT_8MIN,
TIMEOUT_10MIN,
TIMEOUT_10SEC,
TIMEOUT_12MIN,
TIMEOUT_25MIN,
TIMEOUT_30MIN,
TIMEOUT_30SEC,
VIRT_HANDLER,
VIRT_LAUNCHER,
VIRTCTL,
ArchImages,
Images,
)
from utilities.data_collector import collect_vnc_screenshot_for_vms
from utilities.hco import get_hco_namespace, wait_for_hco_conditions
from utilities.network import (
cloud_init_network_data,
)
from utilities.storage import get_default_storage_class
if TYPE_CHECKING:
from libs.vm.vm import BaseVirtualMachine
LOGGER = logging.getLogger(__name__)
K8S_TAINT = "node.kubernetes.io/unschedulable"
NO_SCHEDULE = "NoSchedule"
CIRROS_IMAGE = "kubevirt/cirros-container-disk-demo:latest"
FLAVORS_EXCLUDED_FROM_CLOUD_INIT = (OS_FLAVOR_WINDOWS, OS_FLAVOR_CIRROS, OS_FLAVOR_ALPINE)
VM_ERROR_STATUSES = [
VirtualMachine.Status.CRASH_LOOPBACK_OFF,
VirtualMachine.Status.ERROR_UNSCHEDULABLE,
VirtualMachine.Status.ERROR_PVC_NOT_FOUND,
VirtualMachine.Status.IMAGE_PULL_BACK_OFF,
VirtualMachine.Status.ERR_IMAGE_PULL,
]
def wait_for_vm_interfaces(vmi: VirtualMachineInstance, timeout: int = TIMEOUT_12MIN) -> bool:
"""
Wait until guest agent report VMI network interfaces.
Args:
vmi (VirtualMachineInstance): VMI object.
timeout (int): Maximum time to wait for interfaces status
Returns:
bool: True if agent report VMI interfaces.
Raises:
TimeoutExpiredError: After timeout reached.
"""
# Waiting for guest agent connection before checking guest agent interfaces report
LOGGER.info(f"Wait until guest agent is active on {vmi.name}")
vmi.wait_for_condition(
condition=VirtualMachineInstance.Condition.Type.AGENT_CONNECTED,
status=VirtualMachineInstance.Condition.Status.TRUE,
timeout=timeout,
)
LOGGER.info(f"Wait for {vmi.name} network interfaces")
sampler = TimeoutSampler(wait_timeout=timeout, sleep=1, func=lambda: vmi.instance)
for sample in sampler:
interfaces = sample.get("status", {}).get("interfaces", [])
active_interfaces = [interface for interface in interfaces if interface.get("interfaceName")]
if len(active_interfaces) == len(interfaces):
return True
return False
def generate_cloud_init_data(data):
"""
Generate cloud init data from a dictionary.
Args:
data (dict): cloud init data to set under desired section.
Returns:
str: A generated str for cloud init.
Example:
data = {
"networkData": {
"version": 2,
"ethernets": {
"eth0": {
"dhcp4": True,
"addresses": "[ fd10:0:2::2/120 ]",
"gateway6": "fd10:0:2::1",
}
}
}
}
with VirtualMachineForTests(
namespace="namespace",
name="vm",
body=fedora_vm_body("vm"),
cloud_init_data=data,
) as vm:
pass
"""
dict_data = {}
for section, _data in data.items():
str_data = ""
generated_data = yaml.dump(_data, width=1000)
if section == "userData":
str_data += "#cloud-config\n"
for line in generated_data.splitlines():
str_data += f"{line}\n"
dict_data[section] = str_data
return dict_data
def merge_dicts(source_dict, target_dict):
"""Merge nested source_dict into target_dict"""
for key, value in source_dict.items():
if isinstance(value, dict):
node = target_dict.setdefault(key, {})
merge_dicts(source_dict=value, target_dict=node)
else:
target_dict[key] = value
return target_dict
class VirtualMachineForTests(VirtualMachine):
def __init__(
self,
name,
namespace,
eviction_strategy=None,
body=None,
client=None,
interfaces=None,
networks=None,
node_selector=None,
service_accounts=None,
cpu_flags=None,
cpu_limits=None,
cpu_requests=None,
cpu_sockets=None,
cpu_cores=None,
cpu_threads=None,
cpu_model=None,
cpu_max_sockets=None,
memory_requests=None,
memory_limits=None,
memory_guest=None,
memory_max_guest=None,
cloud_init_data=None,
machine_type=None,
image=None,
ssh=True,
ssh_secret=None,
network_model=None,
network_multiqueue=None,
pvc=None,
data_volume=None,
data_volume_template=None,
teardown=True,
cloud_init_type=None,
attached_secret=None,
cpu_placement=False,
isolate_emulator_thread=False,
iothreads_policy=None,
dedicated_iothread=False,
smm_enabled=None,
pvspinlock_enabled=None,
efi_params=None,
diskless_vm=False,
run_strategy=VirtualMachine.RunStrategy.HALTED,
disk_io_options=None,
username=None,
password=None,
macs=None,
interfaces_types=None,
os_flavor=OS_FLAVOR_FEDORA,
host_device_name=None,
gpu_name=None,
vhostmd=False,
vm_debug_logs=False,
priority_class_name=None,
dry_run=None,
additional_labels=None,
generate_unique_name=True,
node_selector_labels=None,
vm_instance_type=None,
vm_instance_type_infer=False,
vm_preference=None,
vm_preference_infer=False,
vm_validation_rule=None,
termination_grace_period=None,
disk_type="virtio",
yaml_file=None,
tpm_params=None,
hugepages_page_size=None,
vm_affinity=None,
annotations=None,
):
"""
Virtual machine creation
Args:
name (str): VM name
namespace (str): Namespace name
eviction_strategy (str, optional): valid options("None", "LiveMigrate", "LiveMigrateIfPossible", "External")
Default value None here is same as Null and not the string "None" which is one of the valid options
body (dict, optional): VM [metadata] and spec
client (:obj:`DynamicClient`, optional): admin client or unprivileged client
interfaces (list, optional): list of interfaces names
networks (dict, optional)
node_selector (dict, optional): Node name
service_accounts (list, optional): list of service account names
cpu_flags (str, optional)
cpu_limits (quantity, optional): quantity supports string, ints, and floats
cpu_requests (quantity, optional): quantity supports string, ints, and floats
cpu_sockets (int, optional)
cpu_cores (int, optional)
cpu_threads (int, optional)
cpu_model (str, optional)
cpu_max_sockets (int, optional)
memory_requests (str, optional)
memory_limits (str, optional)
memory_guest (str, optional)
memory_max_guest (str, optional)
cloud_init_data (dict, optional): cloud-init dict
machine_type (str, optional)
image (str, optional)
ssh (bool, default: True): If True and using "with" (contextmanager) statement, create an SSH service
ssh_secret (:obj:,`Secret`, optional): Needs cloud_init_type as cloudInitNoCloud
network_model (str, optional)
network_multiqueue (None/bool, optional, default: None): If not None, set to True/False
pvc (:obj:`PersistentVolumeClaim`, optional)
data_volume (:obj:`DataVolume`, optional)
data_volume_template (dict, optional)
teardown (bool, default: True)
cloud_init_type (str, optional): cloud-init type, for example: cloudInitNoCloud, cloudInitConfigDrive
attached_secret (dict, optional)
cpu_placement (bool, default: False): If True, set dedicatedCpuPlacement = True
isolate_emulator_thread (bool, default: False): If True, set isolateEmulatorThread = True.
Need to explicitly also set cpu_placement = True, as dedicatedCpuPlacement should also be True.
iothreads_policy (str, optional, default: None): If not None, set to auto/shared
dedicated_iothread (bool, optional, default: False): If True, set dedicatedIOThread to True
smm_enabled (None/bool, optional, default: None): If not None, set to True/False
pvspinlock_enabled (bool, optional, default: None): If not None, set to True/False
efi_params (dict, optional)
diskless_vm (bool, default: False): If True, remove VM disks
run_strategy (str, default: "Halted"): Set runStrategy
disk_io_options (str, optional): Set root disk IO
username (str, optional): SSH username
password (str, optional): SSH password
macs (dict, optional): Dict of {interface_name: mac address}
interfaces_types (dict, optional): Dict of interfaces names and type ({"iface1": "sriov"})
os_flavor (str, default: fedora): OS flavor to get SSH login parameters.
(flavor should be exist in constants.py)
host_device_name (str, optional): PCI Host Device Name (For Example: "nvidia.com/GV100GL_Tesla_V100")
gpu_name (str, optional): GPU Device Name (For Example: "nvidia.com/GV100GL_Tesla_V100")
vhostmd (bool, optional, default: False): If True, configure vhostmd.
vm_debug_logs(bool, default=False): if True, add 'debugLogs' label to VM to
enable libvirt debug logs in the virt-launcher pod.
Is set to True if py_config["data_collector"] is True.
priority_class_name (str, optional): The name of the priority class used for the VM
dry_run (str, default=None): If "All", the resource will be created using the dry_run flag
additional_labels (dict, optional): Dict of additional labels for VM (e.g. {"vm-label": "best-vm"})
generate_unique_name: if True then it will set dynamic name for the vm, False will use the name of vm passed
node_selector_labels (str, optional): Labels for node selector.
vm_instance_type (VirtualMachineInstancetype, optional): instance type object for the VM
vm_instance_type_infer (bool, optional): if True fetch the instance type from the VM volume
vm_preference (VirtualMachinePreference, optional): preference object for the VM
vm_preference_infer (bool, optional): if True fetch the preference from the VM volume
vm_validation_rule (dict, optional): dict defining validation rule to be added to the VM
termination_grace_period (int, optional): seconds to wait until VMI is force terminated after stopping
disk_type (str, default: "virtio"): define disk type (e.g "virtio", "sata", None)
tpm_params (dict, optional):
{} - for tpm not persistent state (suitable for bypassing windows install tpm check)
{persistent: true} - for persistent state
hugepages_page_size (str, optional) defines the size of huge pages,Valid values are 2 Mi and 1 Gi
vm_affinity (dict, optional): If affinity is specifies, obey all the affinity rules
annotations (dict, optional): annotations to be added to the VM
"""
# Sets VM unique name - replaces "." with "-" in the name to handle valid values.
self.name = utilities.infra.unique_name(name=name) if generate_unique_name else name
super().__init__(
name=self.name,
namespace=namespace,
client=client,
teardown=teardown,
dry_run=dry_run,
node_selector=node_selector,
node_selector_labels=node_selector_labels,
yaml_file=yaml_file,
)
self.body = body
self.interfaces = interfaces or []
self.service_accounts = service_accounts or []
self.networks = networks or {}
self.node_selector = node_selector
self.eviction_strategy = eviction_strategy
self.cpu_flags = cpu_flags
self.cpu_limits = cpu_limits
self.cpu_requests = cpu_requests
self.cpu_sockets = cpu_sockets
self.cpu_cores = cpu_cores
self.cpu_threads = cpu_threads
self.cpu_model = cpu_model
self.cpu_max_sockets = cpu_max_sockets
self.memory_requests = memory_requests
self.memory_limits = memory_limits
self.memory_guest = memory_guest
self.memory_max_guest = memory_max_guest
self.cloud_init_data = cloud_init_data
self.machine_type = machine_type
self.image = image
self.ssh = ssh
self.ssh_secret = ssh_secret
self.custom_service = None
self.network_model = network_model
self.network_multiqueue = network_multiqueue
self.data_volume_template = data_volume_template
self.cloud_init_type = cloud_init_type
self.pvc = pvc
self.attached_secret = attached_secret
self.cpu_placement = cpu_placement
self.isolate_emulator_thread = isolate_emulator_thread
self.iothreads_policy = iothreads_policy
self.dedicated_iothread = dedicated_iothread
self.data_volume = data_volume
self.smm_enabled = smm_enabled
self.pvspinlock_enabled = pvspinlock_enabled
self.efi_params = efi_params
self.diskless_vm = diskless_vm
self.is_vm_from_template = False
self.run_strategy = run_strategy
self.disk_io_options = disk_io_options
self.username = username
self.password = password
self.macs = macs
self.interfaces_types = interfaces_types or {}
self.os_flavor = os_flavor
self.host_device_name = host_device_name
self.gpu_name = gpu_name
self.vhostmd = vhostmd
self.vm_debug_logs = vm_debug_logs or py_config.get("data_collector")
self.priority_class_name = priority_class_name
self.additional_labels = additional_labels
self.node_selector_labels = node_selector_labels
self.vm_instance_type = vm_instance_type
self.vm_instance_type_infer = vm_instance_type_infer
self.vm_preference = vm_preference
self.vm_preference_infer = vm_preference_infer
self.vm_validation_rule = vm_validation_rule
self.termination_grace_period = termination_grace_period
self.disk_type = disk_type
self.tpm_params = tpm_params
self.hugepages_page_size = hugepages_page_size
self.vm_affinity = vm_affinity
self.annotations = annotations
# Must be here to apply on existing VMs
self.set_login_params()
def deploy(self, wait=False):
super().deploy(wait=wait)
return self
def clean_up(self, wait: bool = True, timeout: int | None = None) -> bool:
if self.exists and self.ready:
self.stop(wait=True, vmi_delete_timeout=TIMEOUT_8MIN)
super().clean_up(wait=wait, timeout=timeout)
if self.custom_service:
self.custom_service.delete(wait=True)
return True
def to_dict(self):
super().to_dict()
self.set_labels()
self.set_rng_device()
self.generate_body()
self.set_run_strategy()
self.set_instance_type()
self.set_vm_preference()
self.set_vm_validation_rule()
self.is_vm_from_template = self._is_vm_from_template()
template_spec = self.res["spec"]["template"]["spec"]
if self.eviction_strategy:
template_spec[EVICTIONSTRATEGY] = self.eviction_strategy
template_spec = self.set_hugepages_page_size(template_spec=template_spec)
template_spec = self.update_node_selector(template_spec=template_spec)
template_spec = self.update_vm_network_configuration(template_spec=template_spec)
template_spec = self.update_vm_cpu_configuration(template_spec=template_spec)
template_spec = self.update_vm_memory_configuration(template_spec=template_spec)
template_spec = self.set_smm(template_spec=template_spec)
template_spec = self.set_pvspinlock(template_spec=template_spec)
template_spec = self.set_efi_params(template_spec=template_spec)
template_spec = self.set_tpm_params(template_spec=template_spec)
template_spec = self.set_machine_type(template_spec=template_spec)
template_spec = self.set_iothreads_policy(template_spec=template_spec)
template_spec = self.set_hostdevice(template_spec=template_spec)
template_spec = self.set_gpu(template_spec=template_spec)
template_spec = self.set_disk_io_configuration(template_spec=template_spec)
template_spec = self.set_priority_class(template_spec=template_spec)
template_spec = self.set_termination_grace_period(template_spec=template_spec)
template_spec = self.set_vm_affinity_rule(template_spec=template_spec)
# Either update storage and cloud-init configuration or remove disks from spec
if self.diskless_vm:
template_spec = self.set_diskless_vm(template_spec=template_spec)
else:
template_spec = self.update_vm_storage_configuration(template_spec=template_spec)
template_spec = self.set_service_accounts(template_spec=template_spec)
# cloud-init disks must be set after DV disks in order to boot from DV.
template_spec = self.update_vm_cloud_init_data(template_spec=template_spec)
template_spec = self.set_vhostmd(template_spec=template_spec)
template_spec = self.update_vm_secret_configuration(template_spec=template_spec)
# VMs do not necessarily have self.cloud_init_data
# cloud-init will not be set for OS in FLAVORS_EXCLUDED_FROM_CLOUD_INIT
if self.ssh and not any(flavor in self.os_flavor for flavor in FLAVORS_EXCLUDED_FROM_CLOUD_INIT):
if self.ssh_secret is None:
template_spec = self.enable_ssh_in_cloud_init_data(template_spec=template_spec)
if self.ssh_secret:
template_spec = self.update_vm_ssh_secret_configuration(template_spec=template_spec)
def set_hugepages_page_size(self, template_spec):
if self.hugepages_page_size:
template_spec.setdefault("domain", {}).setdefault("memory", {})["hugepages"] = {
"pageSize": self.hugepages_page_size
}
return template_spec
def update_node_selector(self, template_spec):
if self.node_selector_spec:
template_spec["nodeSelector"] = self.node_selector_spec
return template_spec
def set_disk_io_configuration(self, template_spec):
if self.disk_io_options or self.dedicated_iothread:
disks_spec = template_spec.setdefault("domain", {}).setdefault("devices", {}).setdefault("disks", [])
for disk in disks_spec:
if disk["name"] == ROOTDISK:
if self.disk_io_options:
disk["io"] = self.disk_io_options
if self.dedicated_iothread:
disk["dedicatedIOThread"] = self.dedicated_iothread
break
template_spec["domain"]["devices"]["disks"] = disks_spec
return template_spec
def set_gpu(self, template_spec):
if self.gpu_name:
template_spec.setdefault("domain", {}).setdefault("devices", {}).setdefault("gpus", []).append({
"deviceName": self.gpu_name,
"name": "gpu",
})
return template_spec
def set_hostdevice(self, template_spec):
if self.host_device_name:
template_spec.setdefault("domain", {}).setdefault("devices", {}).setdefault("hostDevices", []).append({
"deviceName": self.host_device_name,
"name": "hostdevice",
})
return template_spec
def set_diskless_vm(self, template_spec):
template_spec.get("domain", {}).get("devices", {}).pop("disks", None)
# As of https://bugzilla.redhat.com/show_bug.cgi?id=1954667 <skip-bug-check>, it is not possible to create a VM
# with volume(s) without corresponding disks
template_spec.pop("volumes", None)
return template_spec
def set_machine_type(self, template_spec):
if self.machine_type:
template_spec.setdefault("domain", {}).setdefault("machine", {})["type"] = self.machine_type
return template_spec
def set_iothreads_policy(self, template_spec):
if self.iothreads_policy:
template_spec.setdefault("domain", {})["ioThreadsPolicy"] = self.iothreads_policy
return template_spec
def set_efi_params(self, template_spec):
if self.efi_params is not None:
template_spec.setdefault("domain", {}).setdefault("firmware", {}).setdefault("bootloader", {})["efi"] = (
self.efi_params
)
return template_spec
def set_tpm_params(self, template_spec):
if self.tpm_params is not None:
template_spec.setdefault("domain", {}).setdefault("devices", {})["tpm"] = self.tpm_params
return template_spec
def set_smm(self, template_spec):
if self.smm_enabled is not None:
template_spec.setdefault("domain", {}).setdefault("features", {}).setdefault("smm", {})["enabled"] = (
self.smm_enabled
)
return template_spec
def set_pvspinlock(self, template_spec):
if self.pvspinlock_enabled is not None:
template_spec.setdefault("domain", {}).setdefault("features", {}).setdefault("pvspinlock", {})[
"enabled"
] = self.pvspinlock_enabled
return template_spec
def set_priority_class(self, template_spec):
if self.priority_class_name:
template_spec["priorityClassName"] = self.priority_class_name
return template_spec
def set_termination_grace_period(self, template_spec):
if self.termination_grace_period:
template_spec["terminationGracePeriodSeconds"] = self.termination_grace_period
return template_spec
def set_rng_device(self):
# Create rng device so the vm will be able to use /dev/rnd without
# waiting for entropy collecting.
self.res.setdefault("spec", {}).setdefault("template", {}).setdefault("spec", {}).setdefault(
"domain", {}
).setdefault("devices", {}).setdefault("rng", {})
def set_service_accounts(self, template_spec):
for sa in self.service_accounts:
template_spec.setdefault("domain", {}).setdefault("devices", {}).setdefault("disks", []).append({
"disk": {},
"name": sa,
})
template_spec.setdefault("volumes", []).append({"name": sa, "serviceAccount": {"serviceAccountName": sa}})
return template_spec
def set_vhostmd(self, template_spec):
name = "vhostmd"
if self.vhostmd:
template_spec.setdefault("domain", {}).setdefault("devices", {}).setdefault("disks", []).append({
"disk": {"bus": self.disk_type},
"name": name,
})
template_spec.setdefault("volumes", []).append({"name": name, "downwardMetrics": {}})
return template_spec
def set_vm_affinity_rule(self, template_spec):
if self.vm_affinity:
template_spec["affinity"] = self.vm_affinity
return template_spec
def set_labels(self):
vm_labels = self.res["spec"]["template"].setdefault("metadata", {}).setdefault("labels", {})
vm_labels.update({
f"{Resource.ApiGroup.KUBEVIRT_IO}/vm": self.name,
f"{Resource.ApiGroup.KUBEVIRT_IO}/domain": self.name,
})
if self.additional_labels:
vm_labels.update(self.additional_labels)
if self.vm_debug_logs:
vm_labels["debugLogs"] = "true"
def set_run_strategy(self):
# when runStrategy is set to Halted the VM will not start on creation
# when runStrategy is set to Always the VM will start on creation
# To create a VM resource, but not begin VM cloning, use VirtualMachine.RunStrategy.MANUAL
self.res["spec"]["runStrategy"] = self.run_strategy
def set_instance_type(self):
if self.vm_instance_type:
self.res["spec"]["instancetype"] = {
"kind": self.vm_instance_type.kind,
"name": self.vm_instance_type.name,
}
if self.vm_instance_type_infer:
self.res["spec"].setdefault("instancetype", {})["inferFromVolume"] = DV_DISK
def set_vm_preference(self):
if self.vm_preference:
self.res["spec"]["preference"] = {
"kind": self.vm_preference.kind,
"name": self.vm_preference.name,
}
if self.vm_preference_infer:
self.res["spec"].setdefault("preference", {})["inferFromVolume"] = DV_DISK
def set_vm_validation_rule(self):
if self.vm_validation_rule:
add_validation_rule_to_annotation(
vm_annotation=self.res["metadata"].setdefault("annotations", {}),
vm_validation_rule=self.vm_validation_rule,
)
def _is_vm_from_template(self):
return f"{self.ApiGroup.VM_KUBEVIRT_IO}/template" in self.res["metadata"].setdefault("labels", {}).keys()
def generate_body(self):
if self.body:
if self.body.get("metadata"):
# We must set name in Template, since we use a unique name here we override it.
self.res["metadata"] = self.body["metadata"]
self.res["metadata"]["name"] = self.name
self.res["spec"] = self.body["spec"]
if self.annotations:
self.res["metadata"].setdefault("annotations", {}).update(self.annotations)
def update_vm_memory_configuration(self, template_spec):
# Faster VMI start time
if (
OS_FLAVOR_WINDOWS in self.os_flavor
and not self.memory_guest
and not self.memory_requests
and not self.vm_instance_type
and not self.vm_instance_type_infer
):
self.memory_guest = Images.Windows.DEFAULT_MEMORY_SIZE
# memory_guest (memory.guest) value is the amount of memory given to VM itself
# memory_requests (requests.memory) value is the amount of memory given to virt-launcher pod
# (this also includes virtualization infra overhead)
# although both values can be set simulteniously on the VM spec, only memory.guest should be used by user
# (which is meant to reflect VM memory amount)
if self.memory_guest and self.memory_requests:
LOGGER.warning(
"Setting both memory.guest and requests.memory values! (Users should set VM memory via memory.guest!)"
)
if bitmath.parse_string_unsafe(self.memory_guest) > bitmath.parse_string_unsafe(self.memory_requests):
LOGGER.warning(
"Setting memory.guest bigger then requests.memory! (This might cause unpredictable issues!)"
)
if self.memory_guest:
template_spec.setdefault("domain", {}).setdefault("memory", {})["guest"] = str(self.memory_guest)
if self.memory_max_guest:
template_spec.setdefault("domain", {}).setdefault("memory", {})["maxGuest"] = self.memory_max_guest
if self.memory_requests:
LOGGER.warning("Setting requests.memory value! (Users should set VM memory via memory.guest!)")
template_spec.setdefault("domain", {}).setdefault("resources", {}).setdefault("requests", {})["memory"] = (
self.memory_requests
)
if self.memory_limits:
template_spec.setdefault("domain", {}).setdefault("resources", {}).setdefault("limits", {})["memory"] = (
self.memory_limits
)
return template_spec
def update_vm_network_configuration(self, template_spec):
for iface_name in self.interfaces:
iface_type = self.interfaces_types.get(iface_name, "bridge")
network_dict = {"name": iface_name, iface_type: {}}
if self.macs:
network_dict["macAddress"] = self.macs.get(iface_name)
template_spec.setdefault("domain", {}).setdefault("devices", {}).setdefault("interfaces", []).append(
network_dict
)
for iface_name, network in self.networks.items():
template_spec.setdefault("networks", []).append({"name": iface_name, "multus": {"networkName": network}})
if self.network_model:
template_spec.setdefault("domain", {}).setdefault("devices", {}).setdefault("interfaces", [{}])[0][
"model"
] = self.network_model
if self.network_multiqueue is not None:
template_spec.setdefault("domain", {}).setdefault("devices", {}).update({
"networkInterfaceMultiqueue": self.network_multiqueue
})
return template_spec
def _apply_ipv6_masquerade_cloud_init(self) -> None:
"""Apply default IPv6 cloud-init network configuration for the masquerade interface.
Configures both eth0 and enp1s0 with a fixed IPv6 address and gateway to enable
SSH on IPv6 single-stack clusters. Both interface names are configured since
naming is not predictable across VMs. If networkData already exists in
cloud_init_data, the masquerade interfaces are merged without overriding
user-defined eth0 or enp1s0 values.
"""
if not self.cloud_init_data:
self.cloud_init_data = {}
primary_interface_data = {
"addresses": ["fd10:0:2::2/120"],
"gateway6": "fd10:0:2::1",
"dhcp4": False,
"dhcp6": False,
}
# Configure both interface names to ensure network configuration is applied as naming is not predictable
ipv6_interfaces = {
"eth0": {"match": {"name": "eth0"}, **primary_interface_data},
"enp1s0": {"match": {"name": "enp1s0"}, **primary_interface_data},
}
if "networkData" in self.cloud_init_data:
existing_ethernets = self.cloud_init_data["networkData"].get("ethernets", {})
merged_ethernets = {**ipv6_interfaces, **existing_ethernets}
self.cloud_init_data["networkData"]["ethernets"] = merged_ethernets
if "version" not in self.cloud_init_data["networkData"]:
self.cloud_init_data["networkData"]["version"] = 2
else:
self.cloud_init_data.update(cloud_init_network_data(data={"ethernets": ipv6_interfaces}))
def update_vm_cloud_init_data(self, template_spec):
"""Update the VM template spec with cloud-init data.
On IPv6 single-stack clusters, applies default IPv6 network
configuration before merging any user-provided cloud-init data.
If the template spec already contains cloud-init data, userData is
appended and networkData is replaced. Otherwise the generated cloud-init
data is set directly.
Args:
template_spec (dict): The VM template spec to update.
Returns:
dict: The updated template spec.
Example:
IPv6 single-stack cluster result (networkData injected automatically)::
- cloudInitNoCloud:
networkData: |
ethernets:
enp1s0: &id001
addresses:
- fd10:0:2::2/120
dhcp4: false
dhcp6: false
gateway6: fd10:0:2::1
eth0: *id001
version: 2
userData: |-
#cloud-config
chpasswd:
expire: false
password: password
user: fedora
name: cloudinitdisk
Non-IPv6-only cluster result (userData only, no networkData injected)::
- cloudInitNoCloud:
userData: |-
#cloud-config
chpasswd:
expire: false
password: password
user: fedora
name: cloudinitdisk
"""
if is_ipv6_single_stack_cluster():
LOGGER.info(f"IPv6 single-stack cluster detected, applying default IPv6 cloud-init for VM {self.name}")
self._apply_ipv6_masquerade_cloud_init()
if self.cloud_init_data:
cloud_init_volume = vm_cloud_init_volume(vm_spec=template_spec)
cloud_init_volume_type = self.cloud_init_type or CLOUD_INIT_NO_CLOUD
generated_cloud_init = generate_cloud_init_data(data=self.cloud_init_data)
existing_cloud_init_data = cloud_init_volume.get(cloud_init_volume_type)
# If spec already contains cloud init data
if existing_cloud_init_data:
if "userData" in generated_cloud_init:
cloud_init_volume[cloud_init_volume_type]["userData"] += generated_cloud_init[
"userData"
].removeprefix("#cloud-config\n")
if "networkData" in generated_cloud_init:
cloud_init_volume[cloud_init_volume_type]["networkData"] = generated_cloud_init["networkData"]
else:
cloud_init_volume[cloud_init_volume_type] = generated_cloud_init
template_spec = vm_cloud_init_disk(vm_spec=template_spec)
return template_spec
def enable_ssh_in_cloud_init_data(self, template_spec):
cloud_init_volume = vm_cloud_init_volume(vm_spec=template_spec)
cloud_init_volume_type = self.cloud_init_type or CLOUD_INIT_NO_CLOUD
template_spec = vm_cloud_init_disk(vm_spec=template_spec)
cloud_init_volume.setdefault(cloud_init_volume_type, {}).setdefault("userData", "")
# Saving in an intermediate string for readability
cloud_init_user_data = cloud_init_volume[cloud_init_volume_type]["userData"]
# Populate userData with OS-related login credentials; not needed for a VM from template.
if not self.is_vm_from_template:
login_generated_data = generate_cloud_init_data(
data={
"userData": {
"user": self.username,
"password": self.password,
"chpasswd": {"expire": False},
}
}
)
# 'ssh_pwaut' field is needed for Fedora38 VMs, where PasswordAuthentication in
# /etc/ssh/sshd_config.d/50-cloud-init.conf is set to 'no', but to allow ssh connection it should be 'yes'.
if self.os_flavor == OS_FLAVOR_FEDORA:
login_generated_data["userData"] += "ssh_pwauth: true\n"
# Newline needed in case userData is not empty
cloud_init_user_data_newline = "\n" if cloud_init_user_data else ""
cloud_init_user_data += f"{cloud_init_user_data_newline}{login_generated_data['userData']}"
# Add RSA to authorized_keys to enable login using an SSH key
authorized_key = utilities.data_utils.authorized_key(private_key_path=os.environ[CNV_VM_SSH_KEY_PATH])
cloud_init_user_data += f"\nssh_authorized_keys:\n [{authorized_key}]"
# Enable LEGACY crypto policies - needed until keys updated to ECDSA
# Enable PasswordAuthentication in /etc/ssh/sshd_config
# Enable SSH service and restart SSH service
run_cmd_commands = [
(
# TODO: Remove LEGACY ssh-rsa support after ECDSA supported by test
"grep ssh-rsa /etc/crypto-policies/back-ends/opensshserver.config || "
"sudo update-crypto-policies --set LEGACY || true"
),
(r"sudo sed -i 's/^#\?PasswordAuthentication no/PasswordAuthentication yes/g' " "/etc/ssh/sshd_config"),
"sudo systemctl enable sshd",
"sudo systemctl restart sshd",
]
run_ssh_generated_data = generate_cloud_init_data(data={"runcmd": run_cmd_commands})
# If runcmd already exists in userData, add run_cmd_commands before any other command
runcmd_prefix = "runcmd:"
if runcmd_prefix in cloud_init_user_data:
cloud_init_user_data = re.sub(
runcmd_prefix,
f"{runcmd_prefix}\n{run_ssh_generated_data['runcmd']}",
cloud_init_user_data,
)
else:
cloud_init_user_data += f"\nruncmd: {run_cmd_commands}"
cloud_init_volume[cloud_init_volume_type]["userData"] = cloud_init_user_data
return template_spec
def update_vm_cpu_configuration(self, template_spec):
# cpu settings
if self.cpu_flags:
template_spec.setdefault("domain", {})["cpu"] = self.cpu_flags
if self.cpu_limits:
template_spec.setdefault("domain", {}).setdefault("resources", {}).setdefault("limits", {})
template_spec["domain"]["resources"]["limits"].update({"cpu": self.cpu_limits})
if self.cpu_requests:
template_spec.setdefault("domain", {}).setdefault("resources", {}).setdefault("requests", {})
template_spec["domain"]["resources"]["requests"].update({"cpu": self.cpu_requests})
if self.cpu_cores:
template_spec.setdefault("domain", {}).setdefault("cpu", {})["cores"] = self.cpu_cores
# Faster VMI start time
if (
OS_FLAVOR_WINDOWS in self.os_flavor
and not self.cpu_threads
and not self.vm_instance_type
and not self.vm_instance_type_infer
):
self.cpu_threads = Images.Windows.DEFAULT_CPU_THREADS
if self.cpu_threads:
template_spec.setdefault("domain", {}).setdefault("cpu", {})["threads"] = self.cpu_threads
if self.cpu_sockets:
template_spec.setdefault("domain", {}).setdefault("cpu", {})["sockets"] = self.cpu_sockets
if self.cpu_placement:
template_spec.setdefault("domain", {}).setdefault("cpu", {})["dedicatedCpuPlacement"] = True
if self.isolate_emulator_thread:
# This setting has to be specified in a combination with
# cpu_placement = True. Only valid if dedicatedCpuPlacement is True.
template_spec.setdefault("domain", {}).setdefault("cpu", {})["isolateEmulatorThread"] = True
if self.cpu_model:
template_spec.setdefault("domain", {}).setdefault("cpu", {})["model"] = self.cpu_model
if self.cpu_max_sockets:
template_spec.setdefault("domain", {}).setdefault("cpu", {})["maxSockets"] = self.cpu_max_sockets
return template_spec