Skip to content

Commit bfb1d4a

Browse files
committed
Addressed Review comments
1. Removed defining test params based per arch and moved them to place they are set ex: cpu_threads & machine_type 2. Removed skip in the code based on arch, rather added s390x marker to test Signed-off-by: chandramerla <Chandra.Merla@ibm.com>
1 parent d50066e commit bfb1d4a

12 files changed

Lines changed: 80 additions & 74 deletions

File tree

tests/conftest.py

Lines changed: 15 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -70,7 +70,7 @@
7070
from timeout_sampler import TimeoutSampler
7171

7272
import utilities.hco
73-
from tests.utils import download_and_extract_tar, update_cluster_cpu_model
73+
from utilities.architecture import is_s390x, is_x86_64
7474
from utilities.bitwarden import get_cnv_tests_secret_by_name
7575
from utilities.constants import (
7676
AAQ_NAMESPACE_LABEL,
@@ -107,7 +107,6 @@
107107
RHEL9_STR,
108108
RHEL_WITH_INSTANCETYPE_AND_PREFERENCE,
109109
RHSM_SECRET_NAME,
110-
S390X,
111110
SSP_CR_COMMON_TEMPLATES_LIST_KEY_NAME,
112111
TIMEOUT_3MIN,
113112
TIMEOUT_4MIN,
@@ -215,6 +214,9 @@
215214
wait_for_windows_vm,
216215
)
217216

217+
from .utils import download_and_extract_tar, update_cluster_cpu_model
218+
from .virt.constants import MachineTypesNames
219+
218220
LOGGER = logging.getLogger(__name__)
219221
HTTP_SECRET_NAME = "htpass-secret-for-cnv-tests"
220222
HTPASSWD_PROVIDER_DICT = {
@@ -2770,12 +2772,22 @@ def cluster_modern_cpu_model_scope_class(
27702772
wait_for_kv_stabilize(admin_client=admin_client, hco_namespace=hco_namespace)
27712773

27722774

2775+
@pytest.fixture(scope="function")
2776+
def non_default_machine_type():
2777+
if is_x86_64():
2778+
return MachineTypesNames.pc_q35_rhel7_6
2779+
elif is_s390x():
2780+
return MachineTypesNames.s390_ccw_virtio
2781+
else:
2782+
pytest.skip("Unsupported architecture for this test")
2783+
2784+
27732785
@pytest.fixture(scope="module")
27742786
def machine_type_from_kubevirt_config(kubevirt_config_scope_module, nodes_cpu_architecture):
27752787
"""Extract machine type default from kubevirt CR."""
27762788
# Workaround for s390x (https://github.com/kubevirt/kubevirt/issues/14953), as machine type missing in config and
27772789
# hardcoded to s390_ccw_virtio in kubevirt code.
2778-
if nodes_cpu_architecture == S390X:
2790+
if is_s390x():
27792791
mc_type = "s390-ccw-virtio"
27802792
else:
27812793
mc_type = kubevirt_config_scope_module["architectureConfiguration"][nodes_cpu_architecture]["machineType"]

tests/utils.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -284,7 +284,7 @@ def update_cluster_cpu_model(admin_client, hco_namespace, hco_resource, cpu_mode
284284
hco_namespace=hco_namespace,
285285
path=["cpuModel"],
286286
value=cpu_model,
287-
timeout=120,
287+
timeout=30,
288288
)
289289
yield
290290

tests/virt/node/general/test_machinetype.py

Lines changed: 26 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -34,6 +34,23 @@ def vm(request, cluster_cpu_model_scope_function, unprivileged_client, namespace
3434
yield vm
3535

3636

37+
@pytest.fixture()
38+
def vm_with_non_default_machine_type(
39+
request, cluster_cpu_model_scope_function, unprivileged_client, namespace, non_default_machine_type
40+
):
41+
name = f"vm-{request.param['vm_name']}-machine-type"
42+
43+
with VirtualMachineForTests(
44+
name=name,
45+
namespace=namespace.name,
46+
body=fedora_vm_body(name=name),
47+
client=unprivileged_client,
48+
machine_type=non_default_machine_type,
49+
) as vm:
50+
running_vm(vm=vm, check_ssh_connectivity=False)
51+
yield vm
52+
53+
3754
@pytest.fixture()
3855
def updated_kubevirt_config_machine_type(
3956
request,
@@ -84,39 +101,22 @@ def migrated_vm(vm, machine_type_from_kubevirt_config):
84101
indirect=True,
85102
)
86103
def test_default_machine_type(machine_type_from_kubevirt_config, vm):
87-
# Workaround for s390x (https://github.com/kubevirt/kubevirt/issues/14953), as machine type missing in config and
88-
# hardcoded to s390_ccw_virtio in kubevirt code.
89-
if machine_type_from_kubevirt_config == MachineTypesNames.s390_ccw_virtio:
90-
expected_libvirt_machine_type = MachineTypesNames.s390_ccw_virtio_rhel9_6
91-
else:
92-
expected_libvirt_machine_type = machine_type_from_kubevirt_config
93-
validate_machine_type(
94-
vm=vm,
95-
expected_machine_type=machine_type_from_kubevirt_config,
96-
expected_libvirt_machine_type=expected_libvirt_machine_type,
97-
)
104+
validate_machine_type(vm=vm, expected_machine_type=machine_type_from_kubevirt_config)
98105

99106

100107
@pytest.mark.parametrize(
101-
"vm, expected, expected_libvirt",
108+
"vm_with_non_default_machine_type",
102109
[
103110
pytest.param(
104-
{"vm_name": "pc-q35", "machine_type": MachineTypesNames.pc_q35_rhel7_6},
105-
MachineTypesNames.pc_q35_rhel7_6,
106-
MachineTypesNames.pc_q35_rhel7_6,
107-
marks=[pytest.mark.polarion("CNV-3311"), pytest.mark.x86_64()],
108-
),
109-
pytest.param(
110-
{"vm_name": "s390-ccw-virtio", "machine_type": MachineTypesNames.s390_ccw_virtio},
111-
MachineTypesNames.s390_ccw_virtio,
112-
MachineTypesNames.s390_ccw_virtio_rhel9_6,
113-
marks=[pytest.mark.s390x()],
114-
),
111+
{"vm_name": "custom-machine-type"},
112+
marks=[pytest.mark.polarion("CNV-3311")],
113+
)
115114
],
116-
indirect=["vm"],
115+
indirect=["vm_with_non_default_machine_type"],
117116
)
118-
def test_vm_machine_type(vm, expected, expected_libvirt):
119-
validate_machine_type(vm=vm, expected_machine_type=expected, expected_libvirt_machine_type=expected_libvirt)
117+
@pytest.mark.s390x
118+
def test_vm_machine_type(vm_with_non_default_machine_type, non_default_machine_type):
119+
validate_machine_type(vm=vm_with_non_default_machine_type, expected_machine_type=non_default_machine_type)
120120

121121

122122
@pytest.mark.parametrize(
@@ -138,15 +138,9 @@ def test_vm_machine_type(vm, expected, expected_libvirt):
138138
def test_migrate_vm(machine_type_from_kubevirt_config, vm):
139139
"""Migrate VM and check machine type is same"""
140140
migrate_vm_and_verify(vm=vm)
141-
# s390x: machine type missing in config (GH#14953). Same as above.
142-
if machine_type_from_kubevirt_config == MachineTypesNames.s390_ccw_virtio:
143-
expected_libvirt_machine_type = MachineTypesNames.s390_ccw_virtio_rhel9_6
144-
else:
145-
expected_libvirt_machine_type = machine_type_from_kubevirt_config
146141
validate_machine_type(
147142
vm=vm,
148143
expected_machine_type=machine_type_from_kubevirt_config,
149-
expected_libvirt_machine_type=expected_libvirt_machine_type,
150144
)
151145

152146

tests/virt/node/migration_and_maintenance/test_post_copy_migration.py

Lines changed: 5 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -97,7 +97,7 @@ def drained_node_with_hotplugged_vm(admin_client, hotplugged_vm):
9797
"additional_labels": VM_LABEL,
9898
},
9999
id="RHEL-VM",
100-
marks=[pytest.mark.x86_64, pytest.mark.s390x],
100+
marks=[pytest.mark.x86_64],
101101
),
102102
pytest.param(
103103
{
@@ -120,12 +120,14 @@ def drained_node_with_hotplugged_vm(admin_client, hotplugged_vm):
120120
class TestPostCopyMigration:
121121
@pytest.mark.dependency(name=f"{TESTS_CLASS_NAME}::migrate_vm")
122122
@pytest.mark.polarion("CNV-11421")
123+
@pytest.mark.s390x
123124
def test_migrate_vm(self, hotplugged_vm, vm_background_process_id, migrated_hotplugged_vm):
124125
assert_migration_post_copy_mode(vm=hotplugged_vm)
125126
assert_same_pid_after_migration(orig_pid=vm_background_process_id, vm=hotplugged_vm)
126127

127128
@pytest.mark.dependency(name=f"{TESTS_CLASS_NAME}::node_drain", depends=[f"{TESTS_CLASS_NAME}::migrate_vm"])
128129
@pytest.mark.polarion("CNV-11422")
130+
@pytest.mark.s390x
129131
def test_node_drain(self, hotplugged_vm, vm_background_process_id, drained_node_with_hotplugged_vm):
130132
assert_migration_post_copy_mode(vm=hotplugged_vm)
131133
assert_same_pid_after_migration(orig_pid=vm_background_process_id, vm=hotplugged_vm)
@@ -135,6 +137,7 @@ def test_node_drain(self, hotplugged_vm, vm_background_process_id, drained_node_
135137
)
136138
@pytest.mark.dependency(name=f"{TESTS_CLASS_NAME}::hotplug_cpu", depends=[f"{TESTS_CLASS_NAME}::node_drain"])
137139
@pytest.mark.polarion("CNV-11423")
140+
@pytest.mark.s390x
138141
def test_hotplug_cpu(self, hotplugged_sockets_memory_guest, hotplugged_vm, vm_background_process_id):
139142
assert_guest_os_cpu_count(vm=hotplugged_vm, spec_cpu_amount=SIX_CPU_SOCKETS)
140143
assert_same_pid_after_migration(orig_pid=vm_background_process_id, vm=hotplugged_vm)
@@ -144,10 +147,6 @@ def test_hotplug_cpu(self, hotplugged_sockets_memory_guest, hotplugged_vm, vm_ba
144147
)
145148
@pytest.mark.dependency(depends=[f"{TESTS_CLASS_NAME}::hotplug_cpu"])
146149
@pytest.mark.polarion("CNV-11424")
147-
def test_hotplug_memory(self, request, hotplugged_sockets_memory_guest, hotplugged_vm, vm_background_process_id):
148-
# Dynamically skip only this test when class is run with s390x params as memory hotplug is not supported
149-
if request.node.get_closest_marker("s390x"):
150-
pytest.skip("Skipping test_hotplug_memory for s390x")
151-
150+
def test_hotplug_memory(self, hotplugged_sockets_memory_guest, hotplugged_vm, vm_background_process_id):
152151
assert_guest_os_memory_amount(vm=hotplugged_vm, spec_memory_amount=SIX_GI_MEMORY)
153152
assert_same_pid_after_migration(orig_pid=vm_background_process_id, vm=hotplugged_vm)

tests/virt/node/migration_and_maintenance/test_vm_disk_load_with_migration.py

Lines changed: 2 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -72,20 +72,6 @@ def get_disk_usage(ssh_exec):
7272
@pytest.mark.parametrize(
7373
"data_volume_scope_function, vm_with_fio",
7474
[
75-
pytest.param(
76-
{
77-
"dv_name": FEDORA_LATEST_OS,
78-
"image": FEDORA_LATEST.get("image_path"),
79-
"storage_class": py_config["default_storage_class"],
80-
"dv_size": FEDORA_LATEST.get("dv_size"),
81-
},
82-
{
83-
"vm_name": "fedora-load-vm",
84-
"template_labels": FEDORA_LATEST_LABELS,
85-
"cpu_threads": 2,
86-
},
87-
marks=[pytest.mark.polarion("CNV-4663"), pytest.mark.x86_64()],
88-
),
8975
pytest.param(
9076
{
9177
"dv_name": FEDORA_LATEST_OS,
@@ -98,11 +84,12 @@ def get_disk_usage(ssh_exec):
9884
"template_labels": FEDORA_LATEST_LABELS,
9985
"cpu_threads": 1,
10086
},
101-
marks=[pytest.mark.s390x()],
87+
marks=[pytest.mark.polarion("CNV-4663")],
10288
),
10389
],
10490
indirect=True,
10591
)
92+
@pytest.mark.s390x
10693
@pytest.mark.rwx_default_storage
10794
def test_fedora_vm_load_migration(vm_with_fio, running_fio_in_vm):
10895
LOGGER.info("Test migrate VM with disk load")

tests/virt/node/migration_and_maintenance/test_vm_memory_load_with_migration.py

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -58,17 +58,16 @@ class TestMigrationVMWithMemoryLoad:
5858
"vm_name": "fedora-vm-with-memory-load",
5959
"template_labels": FEDORA_LATEST_LABELS,
6060
"memory_guest": "4Gi",
61-
"cpu_cores": 2,
61+
"cpu_cores": 1,
6262
},
6363
marks=[
64-
pytest.mark.polarion("CNV-4661"),
65-
pytest.mark.x86_64(),
66-
pytest.mark.s390x(),
64+
pytest.mark.polarion("CNV-4661")
6765
],
6866
),
6967
],
7068
indirect=True,
7169
)
70+
@pytest.mark.s390x
7271
def test_fedora_vm_migrate_with_memory_load(
7372
self,
7473
vm_with_memory_load,

tests/virt/node/node_labeller/cpu_features/test_node_feature_discovery.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -56,7 +56,6 @@ def updated_kubevirt_cpus(
5656
hco_namespace=hco_namespace,
5757
path=[OBSOLETE_CPU, cluster_common_node_cpu],
5858
value=True,
59-
timeout=120,
6059
)
6160
wait_for_kv_stabilize(admin_client=admin_client, hco_namespace=hco_namespace)
6261
yield

tests/virt/node/node_labeller/cpu_features/test_vm_with_cpu_flag.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -61,7 +61,6 @@ def test_vm_with_cpu_flag_negative(cpu_flag_vm_negative):
6161
cpu_flag_vm_negative.vmi.wait_until_running(timeout=TIMEOUT_1MIN)
6262

6363

64-
@pytest.mark.gating
6564
@pytest.mark.x86_64
6665
@pytest.mark.s390x
6766
@pytest.mark.polarion("CNV-1269")

tests/virt/utils.py

Lines changed: 9 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@
1717
from pytest_testconfig import config as py_config
1818
from timeout_sampler import TimeoutExpiredError, TimeoutSampler
1919

20+
from tests.virt.constants import MachineTypesNames
2021
from tests.virt.node.gpu.constants import (
2122
GPU_PRETTY_NAME_STR,
2223
MDEV_NAME_STR,
@@ -305,12 +306,16 @@ def wait_for_virt_launcher_pod(vmi):
305306
raise
306307

307308

308-
def validate_machine_type(
309-
vm, expected_machine_type, expected_libvirt_machine_type=None
310-
): # Incase of s390x machine_type in VM/VMI are different than one in libvirt xml
309+
def validate_machine_type(vm, expected_machine_type):
311310
vm_machine_type = vm.instance.spec.template.spec.domain.machine.type
312311
vmi_machine_type = vm.vmi.instance.spec.domain.machine.type
313-
if expected_libvirt_machine_type is None:
312+
313+
# Workaround for s390x (https://github.com/kubevirt/kubevirt/issues/14953), as machine type missing in config and
314+
# hardcoded to s390_ccw_virtio in kubevirt code. So incase of s390x machine_type in VM/VMI are different
315+
# than one in libvirt xml
316+
if expected_machine_type == MachineTypesNames.s390_ccw_virtio:
317+
expected_libvirt_machine_type = MachineTypesNames.s390_ccw_virtio_rhel9_6
318+
else:
314319
expected_libvirt_machine_type = expected_machine_type
315320

316321
assert vm_machine_type == vmi_machine_type == expected_machine_type, (

utilities/architecture.py

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -31,3 +31,13 @@ def get_cluster_architecture() -> str:
3131
raise ValueError(f"{arch} architecture in not supported")
3232

3333
return arch
34+
35+
36+
def is_s390x() -> bool:
37+
from utilities.constants import S390X
38+
return get_cluster_architecture() == S390X
39+
40+
41+
def is_x86_64() -> bool:
42+
from utilities.constants import X86_64
43+
return get_cluster_architecture() == X86_64

0 commit comments

Comments
 (0)