Skip to content

Commit 574d712

Browse files
committed
Addressed Review comments
1. Removed defining test params based per arch and moved them to place they are set ex: cpu_threads & machine_type 2. Removed skip in the code based on arch, rather added s390x marker to test Signed-off-by: chandramerla <Chandra.Merla@ibm.com>
1 parent b525d2b commit 574d712

15 files changed

Lines changed: 82 additions & 106 deletions

tests/conftest.py

Lines changed: 10 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -70,7 +70,6 @@
7070
from timeout_sampler import TimeoutSampler
7171

7272
import utilities.hco
73-
from tests.utils import download_and_extract_tar, update_cluster_cpu_model
7473
from utilities.bitwarden import get_cnv_tests_secret_by_name
7574
from utilities.constants import (
7675
AAQ_NAMESPACE_LABEL,
@@ -107,7 +106,6 @@
107106
RHEL9_STR,
108107
RHEL_WITH_INSTANCETYPE_AND_PREFERENCE,
109108
RHSM_SECRET_NAME,
110-
S390X,
111109
SSP_CR_COMMON_TEMPLATES_LIST_KEY_NAME,
112110
TIMEOUT_3MIN,
113111
TIMEOUT_4MIN,
@@ -215,6 +213,9 @@
215213
wait_for_windows_vm,
216214
)
217215

216+
from .utils import download_and_extract_tar, update_cluster_cpu_model
217+
from .virt.constants import MachineTypesNames
218+
218219
LOGGER = logging.getLogger(__name__)
219220
HTTP_SECRET_NAME = "htpass-secret-for-cnv-tests"
220221
HTPASSWD_PROVIDER_DICT = {
@@ -2770,12 +2771,17 @@ def cluster_modern_cpu_model_scope_class(
27702771
wait_for_kv_stabilize(admin_client=admin_client, hco_namespace=hco_namespace)
27712772

27722773

2774+
@pytest.fixture(scope="function")
2775+
def non_default_machine_type(is_s390x_cluster):
2776+
return MachineTypesNames.s390_ccw_virtio if is_s390x_cluster else MachineTypesNames.pc_q35_rhel7_6
2777+
2778+
27732779
@pytest.fixture(scope="module")
2774-
def machine_type_from_kubevirt_config(kubevirt_config_scope_module, nodes_cpu_architecture):
2780+
def machine_type_from_kubevirt_config(is_s390x_cluster, kubevirt_config_scope_module, nodes_cpu_architecture):
27752781
"""Extract machine type default from kubevirt CR."""
27762782
# Workaround for s390x (https://github.com/kubevirt/kubevirt/issues/14953), as machine type missing in config and
27772783
# hardcoded to s390_ccw_virtio in kubevirt code.
2778-
if nodes_cpu_architecture == S390X:
2784+
if is_s390x_cluster:
27792785
mc_type = "s390-ccw-virtio"
27802786
else:
27812787
mc_type = kubevirt_config_scope_module["architectureConfiguration"][nodes_cpu_architecture]["machineType"]

tests/utils.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -284,7 +284,7 @@ def update_cluster_cpu_model(admin_client, hco_namespace, hco_resource, cpu_mode
284284
hco_namespace=hco_namespace,
285285
path=["cpuModel"],
286286
value=cpu_model,
287-
timeout=120,
287+
timeout=30,
288288
)
289289
yield
290290

tests/virt/node/conftest.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -66,7 +66,7 @@ def hotplugged_vm(
6666
client=unprivileged_client,
6767
data_source=golden_image_data_source_scope_class,
6868
cpu_max_sockets=EIGHT_CPU_SOCKETS,
69-
# s390x doesn't support maxGuest as it doesn't support hotplug
69+
# s390x doesn't support maxGuest as it doesn't support hotplug memory
7070
memory_max_guest=TEN_GI_MEMORY if nodes_cpu_architecture != S390X else None,
7171
cpu_sockets=FOUR_CPU_SOCKETS,
7272
cpu_threads=ONE_CPU_THREAD,

tests/virt/node/cpu_sockets_threads/test_cpu_support_sockets_threads.py

Lines changed: 8 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@ def check_vm_dumpxml(vm, cores=None, sockets=None, threads=None):
2121

2222

2323
@pytest.fixture()
24-
def vm_with_cpu_support(request, namespace, unprivileged_client):
24+
def vm_with_cpu_support(request, is_s390x_cluster, namespace, unprivileged_client):
2525
"""
2626
VM with CPU support (cores,sockets,threads)
2727
"""
@@ -31,7 +31,7 @@ def vm_with_cpu_support(request, namespace, unprivileged_client):
3131
namespace=namespace.name,
3232
cpu_cores=request.param["cores"],
3333
cpu_sockets=request.param["sockets"],
34-
cpu_threads=request.param["threads"],
34+
cpu_threads=1 if is_s390x_cluster else request.param["threads"],
3535
cpu_max_sockets=request.param["sockets"] or 1,
3636
body=fedora_vm_body(name=name),
3737
client=unprivileged_client,
@@ -45,37 +45,29 @@ def vm_with_cpu_support(request, namespace, unprivileged_client):
4545
[
4646
pytest.param(
4747
{"sockets": 2, "cores": 2, "threads": 2},
48-
marks=(pytest.mark.polarion("CNV-2820"), pytest.mark.gating, pytest.mark.conformance, pytest.mark.x86_64()),
48+
marks=(pytest.mark.polarion("CNV-2820"), pytest.mark.gating, pytest.mark.conformance),
4949
id="case1: 2 cores, 2 threads, 2 sockets",
5050
),
5151
pytest.param(
5252
{"sockets": None, "cores": 1, "threads": 2},
53-
marks=(pytest.mark.polarion("CNV-2823"), pytest.mark.x86_64()),
53+
marks=(pytest.mark.polarion("CNV-2823")),
5454
id="case2: 1 cores, 2 threads, no sockets",
5555
),
56-
pytest.param(
57-
{"sockets": 2, "cores": 2, "threads": 1},
58-
marks=(pytest.mark.gating(), pytest.mark.s390x()),
59-
id="case1: 2 cores, 1 threads, 2 sockets",
60-
),
61-
pytest.param(
62-
{"sockets": None, "cores": 1, "threads": 1},
63-
marks=[pytest.mark.s390x()],
64-
id="case2: 1 cores, 1 threads, no sockets",
65-
),
6656
pytest.param(
6757
{"sockets": 2, "cores": 1, "threads": None},
68-
marks=(pytest.mark.polarion("CNV-2822"), pytest.mark.x86_64(), pytest.mark.s390x()),
58+
marks=(pytest.mark.polarion("CNV-2822")),
6959
id="case3: 1 cores, no threads, 2 sockets",
7060
),
7161
pytest.param(
7262
{"sockets": None, "cores": 2, "threads": None},
73-
marks=(pytest.mark.polarion("CNV-2821"), pytest.mark.x86_64(), pytest.mark.s390x()),
63+
marks=(pytest.mark.polarion("CNV-2821")),
7464
id="case4: 2 cores, no threads, no sockets",
7565
),
7666
],
7767
indirect=True,
7868
)
69+
@pytest.mark.s390x
70+
@pytest.mark.x86_64
7971
def test_vm_with_cpu_support(vm_with_cpu_support):
8072
"""
8173
Test VM with cpu support

tests/virt/node/general/test_machinetype.py

Lines changed: 20 additions & 37 deletions
Original file line numberDiff line numberDiff line change
@@ -34,6 +34,21 @@ def vm(request, cluster_cpu_model_scope_function, unprivileged_client, namespace
3434
yield vm
3535

3636

37+
@pytest.fixture()
38+
def vm_with_non_default_machine_type(request, unprivileged_client, namespace, non_default_machine_type):
39+
name = "vm-custom-machine-type"
40+
41+
with VirtualMachineForTests(
42+
name=name,
43+
namespace=namespace.name,
44+
body=fedora_vm_body(name=name),
45+
client=unprivileged_client,
46+
machine_type=non_default_machine_type,
47+
) as vm:
48+
running_vm(vm=vm, check_ssh_connectivity=False)
49+
yield vm
50+
51+
3752
@pytest.fixture()
3853
def updated_kubevirt_config_machine_type(
3954
request,
@@ -84,39 +99,13 @@ def migrated_vm(vm, machine_type_from_kubevirt_config):
8499
indirect=True,
85100
)
86101
def test_default_machine_type(machine_type_from_kubevirt_config, vm):
87-
# Workaround for s390x (https://github.com/kubevirt/kubevirt/issues/14953), as machine type missing in config and
88-
# hardcoded to s390_ccw_virtio in kubevirt code.
89-
if machine_type_from_kubevirt_config == MachineTypesNames.s390_ccw_virtio:
90-
expected_libvirt_machine_type = MachineTypesNames.s390_ccw_virtio_rhel9_6
91-
else:
92-
expected_libvirt_machine_type = machine_type_from_kubevirt_config
93-
validate_machine_type(
94-
vm=vm,
95-
expected_machine_type=machine_type_from_kubevirt_config,
96-
expected_libvirt_machine_type=expected_libvirt_machine_type,
97-
)
102+
validate_machine_type(vm=vm, expected_machine_type=machine_type_from_kubevirt_config)
98103

99104

100-
@pytest.mark.parametrize(
101-
"vm, expected, expected_libvirt",
102-
[
103-
pytest.param(
104-
{"vm_name": "pc-q35", "machine_type": MachineTypesNames.pc_q35_rhel7_6},
105-
MachineTypesNames.pc_q35_rhel7_6,
106-
MachineTypesNames.pc_q35_rhel7_6,
107-
marks=[pytest.mark.polarion("CNV-3311"), pytest.mark.x86_64()],
108-
),
109-
pytest.param(
110-
{"vm_name": "s390-ccw-virtio", "machine_type": MachineTypesNames.s390_ccw_virtio},
111-
MachineTypesNames.s390_ccw_virtio,
112-
MachineTypesNames.s390_ccw_virtio_rhel9_6,
113-
marks=[pytest.mark.s390x()],
114-
),
115-
],
116-
indirect=["vm"],
117-
)
118-
def test_vm_machine_type(vm, expected, expected_libvirt):
119-
validate_machine_type(vm=vm, expected_machine_type=expected, expected_libvirt_machine_type=expected_libvirt)
105+
@pytest.mark.polarion("CNV-3311")
106+
@pytest.mark.s390x
107+
def test_vm_machine_type(non_default_machine_type, vm_with_non_default_machine_type):
108+
validate_machine_type(vm=vm_with_non_default_machine_type, expected_machine_type=non_default_machine_type)
120109

121110

122111
@pytest.mark.parametrize(
@@ -138,15 +127,9 @@ def test_vm_machine_type(vm, expected, expected_libvirt):
138127
def test_migrate_vm(machine_type_from_kubevirt_config, vm):
139128
"""Migrate VM and check machine type is same"""
140129
migrate_vm_and_verify(vm=vm)
141-
# s390x: machine type missing in config (GH#14953). Same as above.
142-
if machine_type_from_kubevirt_config == MachineTypesNames.s390_ccw_virtio:
143-
expected_libvirt_machine_type = MachineTypesNames.s390_ccw_virtio_rhel9_6
144-
else:
145-
expected_libvirt_machine_type = machine_type_from_kubevirt_config
146130
validate_machine_type(
147131
vm=vm,
148132
expected_machine_type=machine_type_from_kubevirt_config,
149-
expected_libvirt_machine_type=expected_libvirt_machine_type,
150133
)
151134

152135

tests/virt/node/migration_and_maintenance/test_post_copy_migration.py

Lines changed: 5 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -97,7 +97,7 @@ def drained_node_with_hotplugged_vm(admin_client, hotplugged_vm):
9797
"additional_labels": VM_LABEL,
9898
},
9999
id="RHEL-VM",
100-
marks=[pytest.mark.x86_64, pytest.mark.s390x],
100+
marks=[pytest.mark.x86_64],
101101
),
102102
pytest.param(
103103
{
@@ -120,12 +120,14 @@ def drained_node_with_hotplugged_vm(admin_client, hotplugged_vm):
120120
class TestPostCopyMigration:
121121
@pytest.mark.dependency(name=f"{TESTS_CLASS_NAME}::migrate_vm")
122122
@pytest.mark.polarion("CNV-11421")
123+
@pytest.mark.s390x
123124
def test_migrate_vm(self, hotplugged_vm, vm_background_process_id, migrated_hotplugged_vm):
124125
assert_migration_post_copy_mode(vm=hotplugged_vm)
125126
assert_same_pid_after_migration(orig_pid=vm_background_process_id, vm=hotplugged_vm)
126127

127128
@pytest.mark.dependency(name=f"{TESTS_CLASS_NAME}::node_drain", depends=[f"{TESTS_CLASS_NAME}::migrate_vm"])
128129
@pytest.mark.polarion("CNV-11422")
130+
@pytest.mark.s390x
129131
def test_node_drain(self, hotplugged_vm, vm_background_process_id, drained_node_with_hotplugged_vm):
130132
assert_migration_post_copy_mode(vm=hotplugged_vm)
131133
assert_same_pid_after_migration(orig_pid=vm_background_process_id, vm=hotplugged_vm)
@@ -135,6 +137,7 @@ def test_node_drain(self, hotplugged_vm, vm_background_process_id, drained_node_
135137
)
136138
@pytest.mark.dependency(name=f"{TESTS_CLASS_NAME}::hotplug_cpu", depends=[f"{TESTS_CLASS_NAME}::node_drain"])
137139
@pytest.mark.polarion("CNV-11423")
140+
@pytest.mark.s390x
138141
def test_hotplug_cpu(self, hotplugged_sockets_memory_guest, hotplugged_vm, vm_background_process_id):
139142
assert_guest_os_cpu_count(vm=hotplugged_vm, spec_cpu_amount=SIX_CPU_SOCKETS)
140143
assert_same_pid_after_migration(orig_pid=vm_background_process_id, vm=hotplugged_vm)
@@ -144,10 +147,6 @@ def test_hotplug_cpu(self, hotplugged_sockets_memory_guest, hotplugged_vm, vm_ba
144147
)
145148
@pytest.mark.dependency(depends=[f"{TESTS_CLASS_NAME}::hotplug_cpu"])
146149
@pytest.mark.polarion("CNV-11424")
147-
def test_hotplug_memory(self, request, hotplugged_sockets_memory_guest, hotplugged_vm, vm_background_process_id):
148-
# Dynamically skip only this test when class is run with s390x params as memory hotplug is not supported
149-
if request.node.get_closest_marker("s390x"):
150-
pytest.skip("Skipping test_hotplug_memory for s390x")
151-
150+
def test_hotplug_memory(self, hotplugged_sockets_memory_guest, hotplugged_vm, vm_background_process_id):
152151
assert_guest_os_memory_amount(vm=hotplugged_vm, spec_memory_amount=SIX_GI_MEMORY)
153152
assert_same_pid_after_migration(orig_pid=vm_background_process_id, vm=hotplugged_vm)

tests/virt/node/migration_and_maintenance/test_vm_disk_load_with_migration.py

Lines changed: 2 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -72,20 +72,6 @@ def get_disk_usage(ssh_exec):
7272
@pytest.mark.parametrize(
7373
"data_volume_scope_function, vm_with_fio",
7474
[
75-
pytest.param(
76-
{
77-
"dv_name": FEDORA_LATEST_OS,
78-
"image": FEDORA_LATEST.get("image_path"),
79-
"storage_class": py_config["default_storage_class"],
80-
"dv_size": FEDORA_LATEST.get("dv_size"),
81-
},
82-
{
83-
"vm_name": "fedora-load-vm",
84-
"template_labels": FEDORA_LATEST_LABELS,
85-
"cpu_threads": 2,
86-
},
87-
marks=[pytest.mark.polarion("CNV-4663"), pytest.mark.x86_64()],
88-
),
8975
pytest.param(
9076
{
9177
"dv_name": FEDORA_LATEST_OS,
@@ -98,11 +84,12 @@ def get_disk_usage(ssh_exec):
9884
"template_labels": FEDORA_LATEST_LABELS,
9985
"cpu_threads": 1,
10086
},
101-
marks=[pytest.mark.s390x()],
87+
marks=[pytest.mark.polarion("CNV-4663")],
10288
),
10389
],
10490
indirect=True,
10591
)
92+
@pytest.mark.s390x
10693
@pytest.mark.rwx_default_storage
10794
def test_fedora_vm_load_migration(vm_with_fio, running_fio_in_vm):
10895
LOGGER.info("Test migrate VM with disk load")

tests/virt/node/migration_and_maintenance/test_vm_memory_load_with_migration.py

Lines changed: 3 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -58,17 +58,14 @@ class TestMigrationVMWithMemoryLoad:
5858
"vm_name": "fedora-vm-with-memory-load",
5959
"template_labels": FEDORA_LATEST_LABELS,
6060
"memory_guest": "4Gi",
61-
"cpu_cores": 2,
61+
"cpu_cores": 1,
6262
},
63-
marks=[
64-
pytest.mark.polarion("CNV-4661"),
65-
pytest.mark.x86_64(),
66-
pytest.mark.s390x(),
67-
],
63+
marks=pytest.mark.polarion("CNV-4661"),
6864
),
6965
],
7066
indirect=True,
7167
)
68+
@pytest.mark.s390x
7269
def test_fedora_vm_migrate_with_memory_load(
7370
self,
7471
vm_with_memory_load,

tests/virt/node/node_labeller/cpu_features/test_node_feature_discovery.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -56,7 +56,6 @@ def updated_kubevirt_cpus(
5656
hco_namespace=hco_namespace,
5757
path=[OBSOLETE_CPU, cluster_common_node_cpu],
5858
value=True,
59-
timeout=120,
6059
)
6160
wait_for_kv_stabilize(admin_client=admin_client, hco_namespace=hco_namespace)
6261
yield
@@ -75,8 +74,8 @@ def node_label_checker(node_label_dict, label_list, dict_key):
7574

7675

7776
@pytest.mark.x86_64
78-
@pytest.mark.s390x
7977
@pytest.mark.polarion("CNV-2797")
78+
@pytest.mark.s390x
8079
def test_obsolete_cpus_in_node_labels(nodes_labels_dict, kubevirt_config):
8180
"""
8281
Test obsolete CPUs. Obsolete CPUs don't appear in node labels.

tests/virt/node/node_labeller/cpu_features/test_vm_with_cpu_flag.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -61,7 +61,6 @@ def test_vm_with_cpu_flag_negative(cpu_flag_vm_negative):
6161
cpu_flag_vm_negative.vmi.wait_until_running(timeout=TIMEOUT_1MIN)
6262

6363

64-
@pytest.mark.gating
6564
@pytest.mark.x86_64
6665
@pytest.mark.s390x
6766
@pytest.mark.polarion("CNV-1269")

0 commit comments

Comments
 (0)