diff --git a/lib/common.py b/lib/common.py index 101aa19a8..05df77586 100644 --- a/lib/common.py +++ b/lib/common.py @@ -22,6 +22,11 @@ if TYPE_CHECKING: import lib.host +KiB = 2**10 +MiB = KiB**2 +GiB = KiB**3 +TiB = KiB**4 + T = TypeVar("T") HostAddress: TypeAlias = str diff --git a/lib/sr.py b/lib/sr.py index dffa2c833..24c22a919 100644 --- a/lib/sr.py +++ b/lib/sr.py @@ -3,6 +3,7 @@ import lib.commands as commands from lib.common import ( + GiB, prefix_object_name, safe_split, strtobool, @@ -166,7 +167,7 @@ def get_type(self) -> str: self._type = self.pool.master.xe("sr-param-get", {"uuid": self.uuid, "param-name": "type"}) return self._type - def create_vdi(self, name_label: str, virtual_size: int = 64, image_format: Optional[str] = None) -> VDI: + def create_vdi(self, name_label: str, virtual_size: int = 1 * GiB, image_format: Optional[str] = None) -> VDI: logging.info("Create VDI %r on SR %s", name_label, self.uuid) args = { 'name-label': prefix_object_name(name_label), diff --git a/lib/vm.py b/lib/vm.py index 7ae585702..eaf310912 100644 --- a/lib/vm.py +++ b/lib/vm.py @@ -309,11 +309,12 @@ def connect_vdi(self, vdi: VDI, device: str = "autodetect") -> str: "vm-uuid": self.uuid, "device": device, }) - try: - self.host.xe("vbd-plug", {"uuid": vbd_uuid}) - except commands.SSHCommandFailed: - self.host.xe("vbd-destroy", {"uuid": vbd_uuid}) - raise + if self.is_running(): + try: + self.host.xe("vbd-plug", {"uuid": vbd_uuid}) + except commands.SSHCommandFailed: + self.host.xe("vbd-destroy", {"uuid": vbd_uuid}) + raise self.vdis.append(vdi) @@ -321,18 +322,19 @@ def connect_vdi(self, vdi: VDI, device: str = "autodetect") -> str: def disconnect_vdi(self, vdi: VDI): logging.info(f"<< Unplugging VDI {vdi.uuid} from VM {self.uuid}") - assert vdi in self.vdis, "VDI {vdi.uuid} not in VM {self.uuid} VDI list" + assert vdi in self.vdis, f"VDI {vdi.uuid} not in VM {self.uuid} VDI list" vbd_uuid = self.host.xe("vbd-list", { "vdi-uuid": vdi.uuid, "vm-uuid": self.uuid }, minimal=True) - try: - self.host.xe("vbd-unplug", {"uuid": vbd_uuid}) - except commands.SSHCommandFailed as e: - if e.stdout == f"The device is not currently attached\ndevice: {vbd_uuid}": - logging.info(f"VBD {vbd_uuid} already unplugged") - else: - raise + if self.is_running(): + try: + self.host.xe("vbd-unplug", {"uuid": vbd_uuid}) + except commands.SSHCommandFailed as e: + if e.stdout == f"The device is not currently attached\ndevice: {vbd_uuid}": + logging.info(f"VBD {vbd_uuid} already unplugged") + else: + raise self.host.xe("vbd-destroy", {"uuid": vbd_uuid}) self.vdis.remove(vdi) diff --git a/tests/storage/__init__.py b/tests/storage/__init__.py index 245347b90..f6c7a19f6 100644 --- a/tests/storage/__init__.py +++ b/tests/storage/__init__.py @@ -1,6 +1,9 @@ from .storage import ( cold_migration_then_come_back, + install_randstream, live_storage_migration_then_come_back, + operation_on_vdi, try_to_create_sr_with_missing_device, vdi_is_open, + wait_for_vdi_coalesce, ) diff --git a/tests/storage/coalesce/__init__.py b/tests/storage/coalesce/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/tests/storage/coalesce/conftest.py b/tests/storage/coalesce/conftest.py deleted file mode 100644 index dce94cabc..000000000 --- a/tests/storage/coalesce/conftest.py +++ /dev/null @@ -1,58 +0,0 @@ -import pytest - -import logging - -from lib.vdi import VDI - -MAX_LENGTH = 1 * 1024 * 1024 * 1024 # 1GiB - -@pytest.fixture(scope="module") -def vdi_on_local_sr(host, local_sr_on_hostA1, image_format): - sr = local_sr_on_hostA1 - vdi = sr.create_vdi("testVDI", MAX_LENGTH, image_format=image_format) - logging.info(">> Created VDI {} of type {}".format(vdi.uuid, image_format)) - - yield vdi - - logging.info("<< Destroying VDI {}".format(vdi.uuid)) - vdi.destroy() - -@pytest.fixture(scope="module") -def vdi_with_vbd_on_dom0(host, vdi_on_local_sr): - dom0 = host.get_dom0_vm() - dom0.connect_vdi(vdi_on_local_sr) - - yield vdi_on_local_sr - - dom0.disconnect_vdi(vdi_on_local_sr) - -@pytest.fixture(scope="function") -def data_file_on_host(host): - filename = "/root/data.bin" - logging.info(f">> Creating data file {filename} on host") - size = 1 * 1024 * 1024 # 1MiB - assert size <= MAX_LENGTH, "Size of the data file bigger than the VDI size" - - host.ssh(["dd", "if=/dev/urandom", f"of={filename}", f"bs={size}", "count=1"]) - - yield filename - - logging.info("<< Deleting data file") - host.ssh(["rm", filename]) - -@pytest.fixture(scope="module") -def tapdev(local_sr_on_hostA1, vdi_with_vbd_on_dom0): - """ - A tapdev is a blockdevice allowing access to a VDI from the Dom0. - - It is usually used to give access to the VDI to Qemu for emulating devices - before PV driver are loaded in the guest. - """ - sr_uuid = local_sr_on_hostA1.uuid - vdi_uuid = vdi_with_vbd_on_dom0.uuid - yield f"/dev/sm/backend/{sr_uuid}/{vdi_uuid}" - -@pytest.fixture(scope="package") -def host_with_xxd(host): - host.yum_install(['vim-common']) - return host diff --git a/tests/storage/coalesce/test_coalesce.py b/tests/storage/coalesce/test_coalesce.py deleted file mode 100644 index e97bf7663..000000000 --- a/tests/storage/coalesce/test_coalesce.py +++ /dev/null @@ -1,49 +0,0 @@ -import pytest - -import logging - -from .utils import compare_data, copy_data_to_tapdev, operation_on_vdi, wait_for_vdi_coalesce - -from typing import TYPE_CHECKING - -if TYPE_CHECKING: - from lib.host import Host - from lib.vdi import VDI - -class Test: - def test_write_data(self, host_with_xxd: "Host", tapdev: str, data_file_on_host: str): - length = 1 * 1024 * 1024 - offset = 0 - - logging.info("Copying data to tapdev") - copy_data_to_tapdev(host_with_xxd, data_file_on_host, tapdev, offset, length) - - logging.info("Comparing data to tapdev") - assert compare_data(host_with_xxd, tapdev, data_file_on_host, offset, length) - - @pytest.mark.parametrize("vdi_op", ["snapshot", "clone"]) - def test_coalesce( - self, - host_with_xxd: "Host", - tapdev: str, - vdi_with_vbd_on_dom0: "VDI", - data_file_on_host: str, - vdi_op - ): - vdi = vdi_with_vbd_on_dom0 - vdi_uuid = vdi.uuid - length = 1 * 1024 * 1024 - offset = 0 - - new_vdi = operation_on_vdi(host_with_xxd, vdi_uuid, vdi_op) - - logging.info("Copying data to tapdev") - copy_data_to_tapdev(host_with_xxd, data_file_on_host, tapdev, offset, length) - - logging.info(f"Removing VDI {vdi_op}") - host_with_xxd.xe("vdi-destroy", {"uuid": new_vdi}) - - wait_for_vdi_coalesce(vdi) - - logging.info("Comparing data to tapdev") - assert compare_data(host_with_xxd, tapdev, data_file_on_host, offset, length) diff --git a/tests/storage/coalesce/utils.py b/tests/storage/coalesce/utils.py deleted file mode 100644 index babe75900..000000000 --- a/tests/storage/coalesce/utils.py +++ /dev/null @@ -1,50 +0,0 @@ -import logging - -from lib.common import wait_for_not -from lib.host import Host -from lib.vdi import VDI - -from typing import Literal - -def wait_for_vdi_coalesce(vdi: VDI): - # It is necessary to wait a long time because the GC can be paused for more than 5 minutes. - # And it is also necessary to allow a sufficiently long merge time which depends on the amount of data. - wait_for_not(lambda: vdi.get_parent(), msg="Waiting for coalesce", timeout_secs=7 * 60) - logging.info("Coalesce done") - -def copy_data_to_tapdev(host: Host, data_file: str, tapdev: str, offset: int, length: int): - # if offset == 0: - # off = "0" - # else: - # off = f"{offset}B" # Doesn't work with `dd` version of XCP-ng 8.3 - - bs = 1 - off = int(offset / bs) - count = length / bs - count += length % bs - count = int(count) - cmd = ["dd", f"if={data_file}", f"of={tapdev}", f"bs={bs}", f"seek={off}", f"count={count}"] - host.ssh(cmd) - -def get_data(host: Host, file: str, offset: int, length: int, checksum: bool = False) -> str: - cmd = ["xxd", "-p", "-seek", str(offset), "-len", str(length), file] - if checksum: - cmd = cmd + ["|", "sha256sum"] - return host.ssh(cmd, check=True) - -def get_hashed_data(host: Host, file: str, offset: int, length: int): - return get_data(host, file, offset, length, True).split()[0] - -def operation_on_vdi(host: Host, vdi_uuid: str, vdi_op: Literal["snapshot", "clone"]) -> str: - new_vdi = host.xe(f"vdi-{vdi_op}", {"uuid": vdi_uuid}) - logging.info(f"{vdi_op.capitalize()} VDI {vdi_uuid}: {new_vdi}") - return new_vdi - -def compare_data(host: Host, tapdev: str, data_file: str, offset: int, length: int) -> bool: - logging.info("Getting data from VDI and file") - vdi_checksum = get_hashed_data(host, tapdev, offset, length) - file_checksum = get_hashed_data(host, data_file, 0, length) - logging.info(f"VDI: {vdi_checksum}") - logging.info(f"FILE: {file_checksum}") - - return vdi_checksum == file_checksum diff --git a/tests/storage/conftest.py b/tests/storage/conftest.py index b62b97cdb..08bfb716c 100644 --- a/tests/storage/conftest.py +++ b/tests/storage/conftest.py @@ -1,3 +1,14 @@ +import pytest + +from tests.storage import install_randstream + +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from lib.vdi import VDI + from lib.vm import VM + + def pytest_collection_modifyitems(config, items): # modify ordering so that ext is always tested first, # before more complex storage drivers @@ -5,3 +16,10 @@ def pytest_collection_modifyitems(config, items): if "_ext_" in item.path.name: items.remove(item) items.insert(0, item) + +@pytest.fixture(scope='module') +def storage_test_vm(unix_vm: 'VM'): + unix_vm.start() + unix_vm.wait_for_vm_running_and_ssh_up() + install_randstream(unix_vm) + yield unix_vm diff --git a/tests/storage/ext/conftest.py b/tests/storage/ext/conftest.py index d0fb452a9..6967b04f9 100644 --- a/tests/storage/ext/conftest.py +++ b/tests/storage/ext/conftest.py @@ -4,11 +4,10 @@ import logging -from typing import TYPE_CHECKING, Generator +from lib.host import Host +from lib.sr import SR -if TYPE_CHECKING: - from lib.host import Host - from lib.sr import SR +from typing import Generator @pytest.fixture(scope='package') def ext_sr(host: Host, unused_512B_disks: dict[Host, list[Host.BlockDeviceInfo]]) -> Generator[SR]: @@ -20,7 +19,7 @@ def ext_sr(host: Host, unused_512B_disks: dict[Host, list[Host.BlockDeviceInfo]] sr.destroy() @pytest.fixture(scope='module') -def vdi_on_ext_sr(ext_sr): +def vdi_on_ext_sr(ext_sr: SR): vdi = ext_sr.create_vdi('EXT-local-VDI-test') yield vdi vdi.destroy() diff --git a/tests/storage/ext/test_ext_sr.py b/tests/storage/ext/test_ext_sr.py index 572102b66..3f4ae9cf9 100644 --- a/tests/storage/ext/test_ext_sr.py +++ b/tests/storage/ext/test_ext_sr.py @@ -7,13 +7,15 @@ from lib.commands import SSHCommandFailed from lib.common import vm_image, wait_for from lib.fistpoint import FistPoint +from lib.host import Host from lib.vdi import VDI -from tests.storage import try_to_create_sr_with_missing_device, vdi_is_open - -from typing import TYPE_CHECKING - -if TYPE_CHECKING: - from lib.host import Host +from lib.vm import VM +from tests.storage import ( + operation_on_vdi, + try_to_create_sr_with_missing_device, + vdi_is_open, + wait_for_vdi_coalesce, +) # Requirements: # - one XCP-ng host with an additional unused disk for the SR @@ -66,6 +68,28 @@ def test_snapshot(self, vm_on_ext_sr): finally: vm.shutdown(verify=True) + @pytest.mark.small_vm + @pytest.mark.parametrize("vdi_op", ["snapshot", "clone"]) + def test_coalesce(self, storage_test_vm: VM, vdi_on_ext_sr: VDI, vdi_op): + vm = storage_test_vm + vdi = vdi_on_ext_sr + vm.connect_vdi(vdi, 'xvdb') + new_vdi = None + try: + vm.ssh("randstream generate -v /dev/xvdb") + vm.ssh("randstream validate -v --expected-checksum 65280014 /dev/xvdb") + new_vdi = operation_on_vdi(vm.host, vdi.uuid, vdi_op) + vm.ssh("randstream generate -v --seed 1 --size 128Mi /dev/xvdb") + vm.ssh("randstream validate -v --expected-checksum ad2ca9af /dev/xvdb") + new_vdi.destroy() + new_vdi = None + wait_for_vdi_coalesce(vdi) + vm.ssh("randstream validate -v --expected-checksum ad2ca9af /dev/xvdb") + finally: + vm.disconnect_vdi(vdi) + if new_vdi is not None: + new_vdi.destroy() + # *** tests with reboots (longer tests). @pytest.mark.small_vm diff --git a/tests/storage/lvm/conftest.py b/tests/storage/lvm/conftest.py index fb6845f88..01ec3c689 100644 --- a/tests/storage/lvm/conftest.py +++ b/tests/storage/lvm/conftest.py @@ -4,11 +4,10 @@ import logging -from typing import TYPE_CHECKING, Generator +from lib.host import Host +from lib.sr import SR -if TYPE_CHECKING: - from lib.host import Host - from lib.sr import SR +from typing import Generator @pytest.fixture(scope='package') def lvm_sr(host: Host, unused_512B_disks: dict[Host, list[Host.BlockDeviceInfo]]) -> Generator[SR]: @@ -20,7 +19,7 @@ def lvm_sr(host: Host, unused_512B_disks: dict[Host, list[Host.BlockDeviceInfo]] sr.destroy() @pytest.fixture(scope='module') -def vdi_on_lvm_sr(lvm_sr): +def vdi_on_lvm_sr(lvm_sr: SR): vdi = lvm_sr.create_vdi('LVM-local-VDI-test') yield vdi vdi.destroy() diff --git a/tests/storage/lvm/test_lvm_sr.py b/tests/storage/lvm/test_lvm_sr.py index 63aa79c88..2f70d1353 100644 --- a/tests/storage/lvm/test_lvm_sr.py +++ b/tests/storage/lvm/test_lvm_sr.py @@ -7,13 +7,15 @@ from lib.commands import SSHCommandFailed from lib.common import vm_image, wait_for from lib.fistpoint import FistPoint +from lib.host import Host from lib.vdi import VDI -from tests.storage import try_to_create_sr_with_missing_device, vdi_is_open - -from typing import TYPE_CHECKING - -if TYPE_CHECKING: - from lib.host import Host +from lib.vm import VM +from tests.storage import ( + operation_on_vdi, + try_to_create_sr_with_missing_device, + vdi_is_open, + wait_for_vdi_coalesce, +) # Requirements: # - one XCP-ng host with an additional unused disk for the SR @@ -118,6 +120,28 @@ def test_failing_resize_on_inflate_after_setSizePhys(self, host, lvm_sr, vm_on_l assert lvinflate not in host.lvs(f"VG_XenStorage-{lvm_sr.uuid}"), \ "Inflate journal still exist following the scan" + @pytest.mark.small_vm + @pytest.mark.parametrize("vdi_op", ["snapshot", "clone"]) + def test_coalesce(self, storage_test_vm: VM, vdi_on_lvm_sr: VDI, vdi_op): + vm = storage_test_vm + vdi = vdi_on_lvm_sr + vm.connect_vdi(vdi, 'xvdb') + new_vdi = None + try: + vm.ssh("randstream generate -v /dev/xvdb") + vm.ssh("randstream validate -v --expected-checksum 65280014 /dev/xvdb") + new_vdi = operation_on_vdi(vm.host, vdi.uuid, vdi_op) + vm.ssh("randstream generate -v --seed 1 --size 128Mi /dev/xvdb") + vm.ssh("randstream validate -v --expected-checksum ad2ca9af /dev/xvdb") + new_vdi.destroy() + new_vdi = None + wait_for_vdi_coalesce(vdi) + vm.ssh("randstream validate -v --expected-checksum ad2ca9af /dev/xvdb") + finally: + vm.disconnect_vdi(vdi) + if new_vdi is not None: + new_vdi.destroy() + # *** tests with reboots (longer tests). @pytest.mark.reboot diff --git a/tests/storage/lvmoiscsi/conftest.py b/tests/storage/lvmoiscsi/conftest.py index ee37309fc..89c0075a8 100644 --- a/tests/storage/lvmoiscsi/conftest.py +++ b/tests/storage/lvmoiscsi/conftest.py @@ -3,6 +3,7 @@ import logging from lib import config +from lib.sr import SR @pytest.fixture(scope='package') def lvmoiscsi_device_config(): @@ -17,7 +18,7 @@ def lvmoiscsi_sr(host, lvmoiscsi_device_config): sr.destroy() @pytest.fixture(scope='module') -def vdi_on_lvmoiscsi_sr(lvmoiscsi_sr): +def vdi_on_lvmoiscsi_sr(lvmoiscsi_sr: SR): vdi = lvmoiscsi_sr.create_vdi('lvmoiscsi-VDI-test') yield vdi vdi.destroy() diff --git a/tests/storage/lvmoiscsi/test_lvmoiscsi_sr.py b/tests/storage/lvmoiscsi/test_lvmoiscsi_sr.py index 0fcbd5212..e5014d9df 100644 --- a/tests/storage/lvmoiscsi/test_lvmoiscsi_sr.py +++ b/tests/storage/lvmoiscsi/test_lvmoiscsi_sr.py @@ -1,7 +1,9 @@ import pytest from lib.common import vm_image, wait_for -from tests.storage import vdi_is_open +from lib.vdi import VDI +from lib.vm import VM +from tests.storage import operation_on_vdi, vdi_is_open, wait_for_vdi_coalesce # Requirements: # - one XCP-ng host >= 8.2 @@ -51,6 +53,28 @@ def test_snapshot(self, vm_on_lvmoiscsi_sr): finally: vm.shutdown(verify=True) + @pytest.mark.small_vm + @pytest.mark.parametrize("vdi_op", ["snapshot", "clone"]) + def test_coalesce(self, storage_test_vm: 'VM', vdi_on_lvmoiscsi_sr: 'VDI', vdi_op): + vm = storage_test_vm + vdi = vdi_on_lvmoiscsi_sr + vm.connect_vdi(vdi, 'xvdb') + new_vdi = None + try: + vm.ssh("randstream generate -v /dev/xvdb") + vm.ssh("randstream validate -v --expected-checksum 65280014 /dev/xvdb") + new_vdi = operation_on_vdi(vm.host, vdi.uuid, vdi_op) + vm.ssh("randstream generate -v --seed 1 --size 128Mi /dev/xvdb") + vm.ssh("randstream validate -v --expected-checksum ad2ca9af /dev/xvdb") + new_vdi.destroy() + new_vdi = None + wait_for_vdi_coalesce(vdi) + vm.ssh("randstream validate -v --expected-checksum ad2ca9af /dev/xvdb") + finally: + vm.disconnect_vdi(vdi) + if new_vdi is not None: + new_vdi.destroy() + # *** tests with reboots (longer tests). @pytest.mark.reboot diff --git a/tests/storage/nfs/conftest.py b/tests/storage/nfs/conftest.py index 163108531..2d02e0ea5 100644 --- a/tests/storage/nfs/conftest.py +++ b/tests/storage/nfs/conftest.py @@ -3,6 +3,7 @@ import logging from lib import config +from lib.sr import SR # --- Dispatch fixture for NFS versions ---------------------------------------- @@ -25,7 +26,7 @@ def nfs_sr(host, nfs_device_config): sr.destroy() @pytest.fixture(scope='module') -def vdi_on_nfs_sr(nfs_sr): +def vdi_on_nfs_sr(nfs_sr: SR): vdi = nfs_sr.create_vdi('NFS-VDI-test') yield vdi vdi.destroy() @@ -53,7 +54,7 @@ def nfs4_sr(host, nfs4_device_config): sr.destroy() @pytest.fixture(scope='module') -def vdi_on_nfs4_sr(nfs4_sr): +def vdi_on_nfs4_sr(nfs4_sr: SR): vdi = nfs4_sr.create_vdi('NFS4-VDI-test') yield vdi vdi.destroy() diff --git a/tests/storage/nfs/test_nfs_sr.py b/tests/storage/nfs/test_nfs_sr.py index 6b1790391..3d8eb7075 100644 --- a/tests/storage/nfs/test_nfs_sr.py +++ b/tests/storage/nfs/test_nfs_sr.py @@ -1,7 +1,11 @@ +from __future__ import annotations + import pytest from lib.common import vm_image, wait_for -from tests.storage import vdi_is_open +from lib.vdi import VDI +from lib.vm import VM +from tests.storage import operation_on_vdi, vdi_is_open, wait_for_vdi_coalesce # Requirements: # - one XCP-ng host >= 8.0 with an additional unused disk for the SR @@ -55,6 +59,29 @@ def test_snapshot(self, dispatch_nfs): finally: vm.shutdown(verify=True) + @pytest.mark.small_vm + @pytest.mark.parametrize('dispatch_nfs', ['vdi_on_nfs_sr', 'vdi_on_nfs4_sr'], indirect=True) + @pytest.mark.parametrize("vdi_op", ["snapshot", "clone"]) + def test_coalesce(self, storage_test_vm: VM, dispatch_nfs: VDI, vdi_op): + vm = storage_test_vm + vdi = dispatch_nfs + vm.connect_vdi(vdi, 'xvdb') + new_vdi = None + try: + vm.ssh("randstream generate -v /dev/xvdb") + vm.ssh("randstream validate -v --expected-checksum 65280014 /dev/xvdb") + new_vdi = operation_on_vdi(vm.host, vdi.uuid, vdi_op) + vm.ssh("randstream generate -v --seed 1 --size 128Mi /dev/xvdb") + vm.ssh("randstream validate -v --expected-checksum ad2ca9af /dev/xvdb") + new_vdi.destroy() + new_vdi = None + wait_for_vdi_coalesce(vdi) + vm.ssh("randstream validate -v --expected-checksum ad2ca9af /dev/xvdb") + finally: + vm.disconnect_vdi(vdi) + if new_vdi is not None: + new_vdi.destroy() + # *** tests with reboots (longer tests). @pytest.mark.reboot diff --git a/tests/storage/storage.py b/tests/storage/storage.py index 0f1914638..036a77887 100644 --- a/tests/storage/storage.py +++ b/tests/storage/storage.py @@ -1,6 +1,16 @@ +import logging + from lib.commands import SSHCommandFailed -from lib.common import strtobool, wait_for +from lib.common import strtobool, wait_for, wait_for_not from lib.sr import SR +from lib.vdi import VDI + +from typing import TYPE_CHECKING, Literal + +if TYPE_CHECKING: + from lib.host import Host + from lib.vm import VM + def try_to_create_sr_with_missing_device(sr_type, label, host): try: @@ -72,3 +82,21 @@ def get_xapi_session(): 'vdiUuid': vdi.uuid, 'srRef': master.execute_script(get_sr_ref, shebang='python') })) + + +def operation_on_vdi(host: 'Host', vdi_uuid: str, vdi_op: Literal["snapshot", "clone"]) -> VDI: + new_vdi = host.xe(f"vdi-{vdi_op}", {"uuid": vdi_uuid}) + logging.info(f"{vdi_op.capitalize()} VDI {vdi_uuid}: {new_vdi}") + return VDI(new_vdi, host=host) + + +def wait_for_vdi_coalesce(vdi: VDI): + # It is necessary to wait a long time because the GC can be paused for more than 5 minutes. + # And it is also necessary to allow a sufficiently long merge time which depends on the amount of data. + wait_for_not(lambda: vdi.get_parent(), msg="Waiting for coalesce", timeout_secs=7 * 60) + logging.info("Coalesce done") + + +def install_randstream(vm: 'VM'): + logging.debug("Installing randstream") + vm.ssh("wget -nv https://github.com/xcp-ng/randstream/releases/download/0.3.1/randstream-0.3.1-x86_64-unknown-linux-musl.tar.gz -O - | tar -xzC /usr/bin/ ./randstream") # noqa: E501 diff --git a/tests/storage/xfs/conftest.py b/tests/storage/xfs/conftest.py index 806a5a8cb..206a670f1 100644 --- a/tests/storage/xfs/conftest.py +++ b/tests/storage/xfs/conftest.py @@ -4,11 +4,10 @@ import logging -from typing import TYPE_CHECKING, Generator +from lib.host import Host +from lib.sr import SR -if TYPE_CHECKING: - from lib.host import Host - from lib.sr import SR +from typing import Generator @pytest.fixture(scope='package') def host_with_xfsprogs(host): @@ -31,7 +30,7 @@ def xfs_sr(unused_512B_disks: dict[Host, list[Host.BlockDeviceInfo]], host_with_ sr.destroy() @pytest.fixture(scope='module') -def vdi_on_xfs_sr(xfs_sr): +def vdi_on_xfs_sr(xfs_sr: SR): vdi = xfs_sr.create_vdi('XFS-local-VDI-test') yield vdi vdi.destroy() diff --git a/tests/storage/xfs/test_xfs_sr.py b/tests/storage/xfs/test_xfs_sr.py index 2d567bee7..4135ff3de 100644 --- a/tests/storage/xfs/test_xfs_sr.py +++ b/tests/storage/xfs/test_xfs_sr.py @@ -7,12 +7,10 @@ from lib.commands import SSHCommandFailed from lib.common import vm_image, wait_for -from tests.storage import vdi_is_open - -from typing import TYPE_CHECKING - -if TYPE_CHECKING: - from lib.host import Host +from lib.host import Host +from lib.vdi import VDI +from lib.vm import VM +from tests.storage import operation_on_vdi, vdi_is_open, wait_for_vdi_coalesce # Requirements: # - one XCP-ng host >= 8.2 with an additional unused disk for the SR @@ -84,6 +82,28 @@ def test_snapshot(self, vm_on_xfs_sr): finally: vm.shutdown(verify=True) + @pytest.mark.small_vm + @pytest.mark.parametrize("vdi_op", ["snapshot", "clone"]) + def test_coalesce(self, storage_test_vm: VM, vdi_on_xfs_sr: VDI, vdi_op): + vm = storage_test_vm + vdi = vdi_on_xfs_sr + vm.connect_vdi(vdi, 'xvdb') + new_vdi = None + try: + vm.ssh("randstream generate -v /dev/xvdb") + vm.ssh("randstream validate -v --expected-checksum 65280014 /dev/xvdb") + new_vdi = operation_on_vdi(vm.host, vdi.uuid, vdi_op) + vm.ssh("randstream generate -v --seed 1 --size 128Mi /dev/xvdb") + vm.ssh("randstream validate -v --expected-checksum ad2ca9af /dev/xvdb") + new_vdi.destroy() + new_vdi = None + wait_for_vdi_coalesce(vdi) + vm.ssh("randstream validate -v --expected-checksum ad2ca9af /dev/xvdb") + finally: + vm.disconnect_vdi(vdi) + if new_vdi is not None: + new_vdi.destroy() + # *** tests with reboots (longer tests). @pytest.mark.reboot diff --git a/tests/storage/zfs/conftest.py b/tests/storage/zfs/conftest.py index 2cd61925b..012b7c320 100644 --- a/tests/storage/zfs/conftest.py +++ b/tests/storage/zfs/conftest.py @@ -2,6 +2,8 @@ import logging +from lib.sr import SR + # Explicitly import package-scoped fixtures (see explanation in pkgfixtures.py) from pkgfixtures import host_with_saved_yum_state, sr_disk_wiped @@ -36,7 +38,7 @@ def zfs_sr(host, zpool_vol0): sr.destroy() @pytest.fixture(scope='module') -def vdi_on_zfs_sr(zfs_sr): +def vdi_on_zfs_sr(zfs_sr: SR): vdi = zfs_sr.create_vdi('ZFS-local-VDI-test') yield vdi vdi.destroy() diff --git a/tests/storage/zfs/test_zfs_sr.py b/tests/storage/zfs/test_zfs_sr.py index 64ac862d6..3827590e9 100755 --- a/tests/storage/zfs/test_zfs_sr.py +++ b/tests/storage/zfs/test_zfs_sr.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import pytest import logging @@ -5,7 +7,9 @@ from lib.commands import SSHCommandFailed from lib.common import vm_image, wait_for -from tests.storage import vdi_is_open +from lib.vdi import VDI +from lib.vm import VM +from tests.storage import operation_on_vdi, vdi_is_open, wait_for_vdi_coalesce from .conftest import POOL_NAME, POOL_PATH @@ -72,6 +76,28 @@ def test_snapshot(self, vm_on_zfs_sr): finally: vm.shutdown(verify=True) + @pytest.mark.small_vm + @pytest.mark.parametrize("vdi_op", ["snapshot", "clone"]) + def test_coalesce(self, storage_test_vm: VM, vdi_on_zfs_sr: VDI, vdi_op): + vm = storage_test_vm + vdi = vdi_on_zfs_sr + vm.connect_vdi(vdi, 'xvdb') + new_vdi = None + try: + vm.ssh("randstream generate -v /dev/xvdb") + vm.ssh("randstream validate -v --expected-checksum 65280014 /dev/xvdb") + new_vdi = operation_on_vdi(vm.host, vdi.uuid, vdi_op) + vm.ssh("randstream generate -v --seed 1 --size 128Mi /dev/xvdb") + vm.ssh("randstream validate -v --expected-checksum ad2ca9af /dev/xvdb") + new_vdi.destroy() + new_vdi = None + wait_for_vdi_coalesce(vdi) + vm.ssh("randstream validate -v --expected-checksum ad2ca9af /dev/xvdb") + finally: + vm.disconnect_vdi(vdi) + if new_vdi is not None: + new_vdi.destroy() + # *** tests with reboots (longer tests). @pytest.mark.reboot diff --git a/tests/storage/zfsvol/conftest.py b/tests/storage/zfsvol/conftest.py index f06cee5a9..95e3321f2 100644 --- a/tests/storage/zfsvol/conftest.py +++ b/tests/storage/zfsvol/conftest.py @@ -1,7 +1,11 @@ +from __future__ import annotations + import pytest import logging +from lib.sr import SR + # Explicitly import package-scoped fixtures (see explanation in pkgfixtures.py) from pkgfixtures import host_with_saved_yum_state_toolstack_restart, sr_disk_wiped @@ -23,7 +27,7 @@ def zfsvol_sr(host, sr_disk_wiped, host_with_zfsvol): host.ssh(["wipefs", "-a", device]) @pytest.fixture(scope='module') -def vdi_on_zfsvol_sr(zfsvol_sr): +def vdi_on_zfsvol_sr(zfsvol_sr: SR): vdi = zfsvol_sr.create_vdi('ZFS-local-VDI-test') yield vdi vdi.destroy() diff --git a/tests/storage/zfsvol/test_zfsvol_sr.py b/tests/storage/zfsvol/test_zfsvol_sr.py index 3f03edc78..613cc4c9d 100755 --- a/tests/storage/zfsvol/test_zfsvol_sr.py +++ b/tests/storage/zfsvol/test_zfsvol_sr.py @@ -1,11 +1,11 @@ -import pytest +from __future__ import annotations -import logging -import time +import pytest -from lib.commands import SSHCommandFailed from lib.common import vm_image, wait_for -from tests.storage import vdi_is_open +from lib.vdi import VDI +from lib.vm import VM +from tests.storage import operation_on_vdi, wait_for_vdi_coalesce # Requirements: # - one XCP-ng host >= 8.3 with an additional unused disk for the SR @@ -58,6 +58,28 @@ def test_snapshot(self, vm_on_zfsvol_sr): finally: vm.shutdown(verify=True) + @pytest.mark.small_vm + @pytest.mark.parametrize("vdi_op", ["snapshot"]) # "clone" requires a snapshot + def test_coalesce(self, storage_test_vm: VM, vdi_on_zfsvol_sr: VDI, vdi_op): + vm = storage_test_vm + vdi = vdi_on_zfsvol_sr + vm.connect_vdi(vdi, 'xvdb') + new_vdi = None + try: + vm.ssh("randstream generate -v /dev/xvdb") + vm.ssh("randstream validate -v --expected-checksum 65280014 /dev/xvdb") + new_vdi = operation_on_vdi(vm.host, vdi.uuid, vdi_op) + vm.ssh("randstream generate -v --seed 1 --size 128Mi /dev/xvdb") + vm.ssh("randstream validate -v --expected-checksum ad2ca9af /dev/xvdb") + new_vdi.destroy() + new_vdi = None + wait_for_vdi_coalesce(vdi) + vm.ssh("randstream validate -v --expected-checksum ad2ca9af /dev/xvdb") + finally: + vm.disconnect_vdi(vdi) + if new_vdi is not None: + new_vdi.destroy() + # *** tests with reboots (longer tests). @pytest.mark.reboot