Skip to content

Commit 8b06b07

Browse files
committed
Addressed Review comments
1. Removed defining test params based per arch and moved them to place they are set ex: cpu_threads & machine_type 2. Removed skip in the code based on arch, rather added s390x marker to test Signed-off-by: chandramerla <Chandra.Merla@ibm.com>
1 parent cb1cd69 commit 8b06b07

15 files changed

Lines changed: 82 additions & 104 deletions

tests/conftest.py

Lines changed: 10 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -70,7 +70,7 @@
7070
from timeout_sampler import TimeoutSampler
7171

7272
import utilities.hco
73-
from tests.utils import download_and_extract_tar, update_cluster_cpu_model
73+
from utilities.architecture import is_s390x_cluster
7474
from utilities.bitwarden import get_cnv_tests_secret_by_name
7575
from utilities.constants import (
7676
AAQ_NAMESPACE_LABEL,
@@ -107,7 +107,6 @@
107107
RHEL9_STR,
108108
RHEL_WITH_INSTANCETYPE_AND_PREFERENCE,
109109
RHSM_SECRET_NAME,
110-
S390X,
111110
SSP_CR_COMMON_TEMPLATES_LIST_KEY_NAME,
112111
TIMEOUT_3MIN,
113112
TIMEOUT_4MIN,
@@ -215,6 +214,9 @@
215214
wait_for_windows_vm,
216215
)
217216

217+
from .utils import download_and_extract_tar, update_cluster_cpu_model
218+
from .virt.constants import MachineTypesNames
219+
218220
LOGGER = logging.getLogger(__name__)
219221
HTTP_SECRET_NAME = "htpass-secret-for-cnv-tests"
220222
HTPASSWD_PROVIDER_DICT = {
@@ -2770,12 +2772,17 @@ def cluster_modern_cpu_model_scope_class(
27702772
wait_for_kv_stabilize(admin_client=admin_client, hco_namespace=hco_namespace)
27712773

27722774

2775+
@pytest.fixture(scope="function")
2776+
def non_default_machine_type(is_s390x_cluster):
2777+
return MachineTypesNames.s390_ccw_virtio if is_s390x_cluster else MachineTypesNames.pc_q35_rhel7_6
2778+
2779+
27732780
@pytest.fixture(scope="module")
27742781
def machine_type_from_kubevirt_config(kubevirt_config_scope_module, nodes_cpu_architecture):
27752782
"""Extract machine type default from kubevirt CR."""
27762783
# Workaround for s390x (https://github.com/kubevirt/kubevirt/issues/14953), as machine type missing in config and
27772784
# hardcoded to s390_ccw_virtio in kubevirt code.
2778-
if nodes_cpu_architecture == S390X:
2785+
if is_s390x_cluster():
27792786
mc_type = "s390-ccw-virtio"
27802787
else:
27812788
mc_type = kubevirt_config_scope_module["architectureConfiguration"][nodes_cpu_architecture]["machineType"]

tests/utils.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -284,7 +284,7 @@ def update_cluster_cpu_model(admin_client, hco_namespace, hco_resource, cpu_mode
284284
hco_namespace=hco_namespace,
285285
path=["cpuModel"],
286286
value=cpu_model,
287-
timeout=120,
287+
timeout=30,
288288
)
289289
yield
290290

tests/virt/node/conftest.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -66,7 +66,7 @@ def hotplugged_vm(
6666
client=unprivileged_client,
6767
data_source=golden_image_data_source_scope_class,
6868
cpu_max_sockets=EIGHT_CPU_SOCKETS,
69-
# s390x doesn't support maxGuest as it doesn't support hotplug
69+
# s390x doesn't support maxGuest as it doesn't support hotplug memory
7070
memory_max_guest=TEN_GI_MEMORY if nodes_cpu_architecture != S390X else None,
7171
cpu_sockets=FOUR_CPU_SOCKETS,
7272
cpu_threads=ONE_CPU_THREAD,

tests/virt/node/cpu_sockets_threads/test_cpu_support_sockets_threads.py

Lines changed: 8 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@
55
import pytest
66
from kubernetes.dynamic.exceptions import UnprocessibleEntityError
77

8+
from utilities.architecture import is_s390x_cluster
89
from utilities.virt import VirtualMachineForTests, fedora_vm_body, running_vm
910

1011
pytestmark = [pytest.mark.post_upgrade, pytest.mark.sno, pytest.mark.arm64]
@@ -31,7 +32,7 @@ def vm_with_cpu_support(request, namespace, unprivileged_client):
3132
namespace=namespace.name,
3233
cpu_cores=request.param["cores"],
3334
cpu_sockets=request.param["sockets"],
34-
cpu_threads=request.param["threads"],
35+
cpu_threads=1 if is_s390x_cluster() else request.param["threads"],
3536
cpu_max_sockets=request.param["sockets"] or 1,
3637
body=fedora_vm_body(name=name),
3738
client=unprivileged_client,
@@ -45,37 +46,29 @@ def vm_with_cpu_support(request, namespace, unprivileged_client):
4546
[
4647
pytest.param(
4748
{"sockets": 2, "cores": 2, "threads": 2},
48-
marks=(pytest.mark.polarion("CNV-2820"), pytest.mark.gating, pytest.mark.conformance, pytest.mark.x86_64()),
49+
marks=(pytest.mark.polarion("CNV-2820"), pytest.mark.gating, pytest.mark.conformance),
4950
id="case1: 2 cores, 2 threads, 2 sockets",
5051
),
5152
pytest.param(
5253
{"sockets": None, "cores": 1, "threads": 2},
53-
marks=(pytest.mark.polarion("CNV-2823"), pytest.mark.x86_64()),
54+
marks=(pytest.mark.polarion("CNV-2823")),
5455
id="case2: 1 cores, 2 threads, no sockets",
5556
),
56-
pytest.param(
57-
{"sockets": 2, "cores": 2, "threads": 1},
58-
marks=(pytest.mark.gating(), pytest.mark.s390x()),
59-
id="case1: 2 cores, 1 threads, 2 sockets",
60-
),
61-
pytest.param(
62-
{"sockets": None, "cores": 1, "threads": 1},
63-
marks=[pytest.mark.s390x()],
64-
id="case2: 1 cores, 1 threads, no sockets",
65-
),
6657
pytest.param(
6758
{"sockets": 2, "cores": 1, "threads": None},
68-
marks=(pytest.mark.polarion("CNV-2822"), pytest.mark.x86_64(), pytest.mark.s390x()),
59+
marks=(pytest.mark.polarion("CNV-2822")),
6960
id="case3: 1 cores, no threads, 2 sockets",
7061
),
7162
pytest.param(
7263
{"sockets": None, "cores": 2, "threads": None},
73-
marks=(pytest.mark.polarion("CNV-2821"), pytest.mark.x86_64(), pytest.mark.s390x()),
64+
marks=(pytest.mark.polarion("CNV-2821")),
7465
id="case4: 2 cores, no threads, no sockets",
7566
),
7667
],
7768
indirect=True,
7869
)
70+
@pytest.mark.s390x
71+
@pytest.mark.x86_64
7972
def test_vm_with_cpu_support(vm_with_cpu_support):
8073
"""
8174
Test VM with cpu support

tests/virt/node/general/test_machinetype.py

Lines changed: 20 additions & 37 deletions
Original file line numberDiff line numberDiff line change
@@ -34,6 +34,21 @@ def vm(request, cluster_cpu_model_scope_function, unprivileged_client, namespace
3434
yield vm
3535

3636

37+
@pytest.fixture()
38+
def vm_with_non_default_machine_type(request, unprivileged_client, namespace, non_default_machine_type):
39+
name = "vm-custom-machine-type"
40+
41+
with VirtualMachineForTests(
42+
name=name,
43+
namespace=namespace.name,
44+
body=fedora_vm_body(name=name),
45+
client=unprivileged_client,
46+
machine_type=non_default_machine_type,
47+
) as vm:
48+
running_vm(vm=vm, check_ssh_connectivity=False)
49+
yield vm
50+
51+
3752
@pytest.fixture()
3853
def updated_kubevirt_config_machine_type(
3954
request,
@@ -84,39 +99,13 @@ def migrated_vm(vm, machine_type_from_kubevirt_config):
8499
indirect=True,
85100
)
86101
def test_default_machine_type(machine_type_from_kubevirt_config, vm):
87-
# Workaround for s390x (https://github.com/kubevirt/kubevirt/issues/14953), as machine type missing in config and
88-
# hardcoded to s390_ccw_virtio in kubevirt code.
89-
if machine_type_from_kubevirt_config == MachineTypesNames.s390_ccw_virtio:
90-
expected_libvirt_machine_type = MachineTypesNames.s390_ccw_virtio_rhel9_6
91-
else:
92-
expected_libvirt_machine_type = machine_type_from_kubevirt_config
93-
validate_machine_type(
94-
vm=vm,
95-
expected_machine_type=machine_type_from_kubevirt_config,
96-
expected_libvirt_machine_type=expected_libvirt_machine_type,
97-
)
102+
validate_machine_type(vm=vm, expected_machine_type=machine_type_from_kubevirt_config)
98103

99104

100-
@pytest.mark.parametrize(
101-
"vm, expected, expected_libvirt",
102-
[
103-
pytest.param(
104-
{"vm_name": "pc-q35", "machine_type": MachineTypesNames.pc_q35_rhel7_6},
105-
MachineTypesNames.pc_q35_rhel7_6,
106-
MachineTypesNames.pc_q35_rhel7_6,
107-
marks=[pytest.mark.polarion("CNV-3311"), pytest.mark.x86_64()],
108-
),
109-
pytest.param(
110-
{"vm_name": "s390-ccw-virtio", "machine_type": MachineTypesNames.s390_ccw_virtio},
111-
MachineTypesNames.s390_ccw_virtio,
112-
MachineTypesNames.s390_ccw_virtio_rhel9_6,
113-
marks=[pytest.mark.s390x()],
114-
),
115-
],
116-
indirect=["vm"],
117-
)
118-
def test_vm_machine_type(vm, expected, expected_libvirt):
119-
validate_machine_type(vm=vm, expected_machine_type=expected, expected_libvirt_machine_type=expected_libvirt)
105+
@pytest.mark.polarion("CNV-3311")
106+
@pytest.mark.s390x
107+
def test_vm_machine_type(non_default_machine_type, vm_with_non_default_machine_type):
108+
validate_machine_type(vm=vm_with_non_default_machine_type, expected_machine_type=non_default_machine_type)
120109

121110

122111
@pytest.mark.parametrize(
@@ -138,15 +127,9 @@ def test_vm_machine_type(vm, expected, expected_libvirt):
138127
def test_migrate_vm(machine_type_from_kubevirt_config, vm):
139128
"""Migrate VM and check machine type is same"""
140129
migrate_vm_and_verify(vm=vm)
141-
# s390x: machine type missing in config (GH#14953). Same as above.
142-
if machine_type_from_kubevirt_config == MachineTypesNames.s390_ccw_virtio:
143-
expected_libvirt_machine_type = MachineTypesNames.s390_ccw_virtio_rhel9_6
144-
else:
145-
expected_libvirt_machine_type = machine_type_from_kubevirt_config
146130
validate_machine_type(
147131
vm=vm,
148132
expected_machine_type=machine_type_from_kubevirt_config,
149-
expected_libvirt_machine_type=expected_libvirt_machine_type,
150133
)
151134

152135

tests/virt/node/migration_and_maintenance/test_post_copy_migration.py

Lines changed: 5 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -97,7 +97,7 @@ def drained_node_with_hotplugged_vm(admin_client, hotplugged_vm):
9797
"additional_labels": VM_LABEL,
9898
},
9999
id="RHEL-VM",
100-
marks=[pytest.mark.x86_64, pytest.mark.s390x],
100+
marks=[pytest.mark.x86_64],
101101
),
102102
pytest.param(
103103
{
@@ -120,12 +120,14 @@ def drained_node_with_hotplugged_vm(admin_client, hotplugged_vm):
120120
class TestPostCopyMigration:
121121
@pytest.mark.dependency(name=f"{TESTS_CLASS_NAME}::migrate_vm")
122122
@pytest.mark.polarion("CNV-11421")
123+
@pytest.mark.s390x
123124
def test_migrate_vm(self, hotplugged_vm, vm_background_process_id, migrated_hotplugged_vm):
124125
assert_migration_post_copy_mode(vm=hotplugged_vm)
125126
assert_same_pid_after_migration(orig_pid=vm_background_process_id, vm=hotplugged_vm)
126127

127128
@pytest.mark.dependency(name=f"{TESTS_CLASS_NAME}::node_drain", depends=[f"{TESTS_CLASS_NAME}::migrate_vm"])
128129
@pytest.mark.polarion("CNV-11422")
130+
@pytest.mark.s390x
129131
def test_node_drain(self, hotplugged_vm, vm_background_process_id, drained_node_with_hotplugged_vm):
130132
assert_migration_post_copy_mode(vm=hotplugged_vm)
131133
assert_same_pid_after_migration(orig_pid=vm_background_process_id, vm=hotplugged_vm)
@@ -135,6 +137,7 @@ def test_node_drain(self, hotplugged_vm, vm_background_process_id, drained_node_
135137
)
136138
@pytest.mark.dependency(name=f"{TESTS_CLASS_NAME}::hotplug_cpu", depends=[f"{TESTS_CLASS_NAME}::node_drain"])
137139
@pytest.mark.polarion("CNV-11423")
140+
@pytest.mark.s390x
138141
def test_hotplug_cpu(self, hotplugged_sockets_memory_guest, hotplugged_vm, vm_background_process_id):
139142
assert_guest_os_cpu_count(vm=hotplugged_vm, spec_cpu_amount=SIX_CPU_SOCKETS)
140143
assert_same_pid_after_migration(orig_pid=vm_background_process_id, vm=hotplugged_vm)
@@ -144,10 +147,6 @@ def test_hotplug_cpu(self, hotplugged_sockets_memory_guest, hotplugged_vm, vm_ba
144147
)
145148
@pytest.mark.dependency(depends=[f"{TESTS_CLASS_NAME}::hotplug_cpu"])
146149
@pytest.mark.polarion("CNV-11424")
147-
def test_hotplug_memory(self, request, hotplugged_sockets_memory_guest, hotplugged_vm, vm_background_process_id):
148-
# Dynamically skip only this test when class is run with s390x params as memory hotplug is not supported
149-
if request.node.get_closest_marker("s390x"):
150-
pytest.skip("Skipping test_hotplug_memory for s390x")
151-
150+
def test_hotplug_memory(self, hotplugged_sockets_memory_guest, hotplugged_vm, vm_background_process_id):
152151
assert_guest_os_memory_amount(vm=hotplugged_vm, spec_memory_amount=SIX_GI_MEMORY)
153152
assert_same_pid_after_migration(orig_pid=vm_background_process_id, vm=hotplugged_vm)

tests/virt/node/migration_and_maintenance/test_vm_disk_load_with_migration.py

Lines changed: 2 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -72,20 +72,6 @@ def get_disk_usage(ssh_exec):
7272
@pytest.mark.parametrize(
7373
"data_volume_scope_function, vm_with_fio",
7474
[
75-
pytest.param(
76-
{
77-
"dv_name": FEDORA_LATEST_OS,
78-
"image": FEDORA_LATEST.get("image_path"),
79-
"storage_class": py_config["default_storage_class"],
80-
"dv_size": FEDORA_LATEST.get("dv_size"),
81-
},
82-
{
83-
"vm_name": "fedora-load-vm",
84-
"template_labels": FEDORA_LATEST_LABELS,
85-
"cpu_threads": 2,
86-
},
87-
marks=[pytest.mark.polarion("CNV-4663"), pytest.mark.x86_64()],
88-
),
8975
pytest.param(
9076
{
9177
"dv_name": FEDORA_LATEST_OS,
@@ -98,11 +84,12 @@ def get_disk_usage(ssh_exec):
9884
"template_labels": FEDORA_LATEST_LABELS,
9985
"cpu_threads": 1,
10086
},
101-
marks=[pytest.mark.s390x()],
87+
marks=[pytest.mark.polarion("CNV-4663")],
10288
),
10389
],
10490
indirect=True,
10591
)
92+
@pytest.mark.s390x
10693
@pytest.mark.rwx_default_storage
10794
def test_fedora_vm_load_migration(vm_with_fio, running_fio_in_vm):
10895
LOGGER.info("Test migrate VM with disk load")

tests/virt/node/migration_and_maintenance/test_vm_memory_load_with_migration.py

Lines changed: 3 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -58,17 +58,14 @@ class TestMigrationVMWithMemoryLoad:
5858
"vm_name": "fedora-vm-with-memory-load",
5959
"template_labels": FEDORA_LATEST_LABELS,
6060
"memory_guest": "4Gi",
61-
"cpu_cores": 2,
61+
"cpu_cores": 1,
6262
},
63-
marks=[
64-
pytest.mark.polarion("CNV-4661"),
65-
pytest.mark.x86_64(),
66-
pytest.mark.s390x(),
67-
],
63+
marks=pytest.mark.polarion("CNV-4661"),
6864
),
6965
],
7066
indirect=True,
7167
)
68+
@pytest.mark.s390x
7269
def test_fedora_vm_migrate_with_memory_load(
7370
self,
7471
vm_with_memory_load,

tests/virt/node/node_labeller/cpu_features/test_node_feature_discovery.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -56,7 +56,6 @@ def updated_kubevirt_cpus(
5656
hco_namespace=hco_namespace,
5757
path=[OBSOLETE_CPU, cluster_common_node_cpu],
5858
value=True,
59-
timeout=120,
6059
)
6160
wait_for_kv_stabilize(admin_client=admin_client, hco_namespace=hco_namespace)
6261
yield
@@ -75,8 +74,8 @@ def node_label_checker(node_label_dict, label_list, dict_key):
7574

7675

7776
@pytest.mark.x86_64
78-
@pytest.mark.s390x
7977
@pytest.mark.polarion("CNV-2797")
78+
@pytest.mark.s390x
8079
def test_obsolete_cpus_in_node_labels(nodes_labels_dict, kubevirt_config):
8180
"""
8281
Test obsolete CPUs. Obsolete CPUs don't appear in node labels.

tests/virt/node/node_labeller/cpu_features/test_vm_with_cpu_flag.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -61,7 +61,6 @@ def test_vm_with_cpu_flag_negative(cpu_flag_vm_negative):
6161
cpu_flag_vm_negative.vmi.wait_until_running(timeout=TIMEOUT_1MIN)
6262

6363

64-
@pytest.mark.gating
6564
@pytest.mark.x86_64
6665
@pytest.mark.s390x
6766
@pytest.mark.polarion("CNV-1269")

0 commit comments

Comments
 (0)