Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions data.py-dist
Original file line number Diff line number Diff line change
Expand Up @@ -188,6 +188,9 @@ DEFAULT_SR = 'default'
# This setting affects VMs managed by the `imported_vm` fixture.
CACHE_IMPORTED_VM = False

# Default LINSTOR redundancy configuration for creating SRs.
LINSTOR_REDUNDANCY = 2
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I see that value is decreased from 3 to 2 ? may this be explained in commit message ?

Copy link
Copy Markdown
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Not totally true, this value is not decreased from 3 to 2. Before this change the number of hosts was used as LINSTOR redundancy with a maximum value of 3. But yes I agree, not a bad idea to add this info in the first commit.


# Default NFS device config:
NFS_DEVICE_CONFIG: dict[str, str] = {
# 'server': '10.0.0.2', # URL/Hostname of NFS server
Expand Down
3 changes: 3 additions & 0 deletions lib/host.py
Original file line number Diff line number Diff line change
Expand Up @@ -89,6 +89,9 @@ def __init__(self, pool: Pool, hostname_or_ip: str):
def __str__(self) -> str:
return self.hostname_or_ip

def name(self) -> str:
return self.param_get('name-label')

@overload
def ssh(self, cmd: str, *, check: bool = True, simple_output: Literal[True] = True,
suppress_fingerprint_warnings: bool = True, background: Literal[False] = False,
Expand Down
12 changes: 11 additions & 1 deletion tests/storage/linstor/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,11 @@

import lib.commands as commands

try:
from data import LINSTOR_REDUNDANCY # type: ignore
except ImportError:
LINSTOR_REDUNDANCY = 2

# explicit import for package-scope fixtures
from pkgfixtures import pool_with_saved_yum_state

Expand Down Expand Up @@ -141,17 +146,22 @@ def remove_linstor(host: Host) -> None:
with concurrent.futures.ThreadPoolExecutor() as executor:
executor.map(remove_linstor, pool.hosts)

@pytest.fixture(scope='package')
def linstor_redundancy(pool_with_linstor: Pool) -> int:
return min(len(pool_with_linstor.hosts), LINSTOR_REDUNDANCY)

@pytest.fixture(scope='package')
def linstor_sr(
pool_with_linstor: Pool,
linstor_redundancy: int,
provisioning_type: str,
storage_pool_name: str,
lvm_disks: None,
_linstor_config: LinstorConfig
) -> Generator[SR, None, None]:
sr = pool_with_linstor.master.sr_create('linstor', 'LINSTOR-SR-test', {
'group-name': storage_pool_name,
'redundancy': str(min(len(pool_with_linstor.hosts), 3)),
'redundancy': str(linstor_redundancy),
'provisioning': provisioning_type
}, shared=True)
yield sr
Expand Down
52 changes: 34 additions & 18 deletions tests/storage/linstor/test_linstor_sr.py
Original file line number Diff line number Diff line change
Expand Up @@ -183,32 +183,48 @@ def _ensure_resource_remain_diskless(
) -> None:
diskfuls = _get_diskful_hosts(host, controller_option, sr_group_name, vdi_uuid)
for diskless_host in diskless:
assert diskless_host.param_get("name-label").lower() not in diskfuls
assert diskless_host.name().lower() not in diskfuls

class TestLinstorDisklessResource:
@pytest.mark.small_vm
def test_diskless_kept(self, host: Host, linstor_sr: SR, vm_on_linstor_sr: VM, storage_pool_name: str) -> None:
vm = vm_on_linstor_sr
vdi_uuids = vm.vdi_uuids(sr_uuid=linstor_sr.uuid)
vdi_uuid = vdi_uuids[0]
assert vdi_uuid is not None
def test_diskless_kept(
self, host: Host, linstor_sr: SR, linstor_redundancy: int, vm_on_linstor_sr: VM, storage_pool_name: str
) -> None:
if len(linstor_sr.pool.hosts) <= linstor_redundancy:
pytest.skip("This test requires at least one DRBD diskless")

# 1. Prepare options.
controller_option = "--controllers="
for member in host.pool.hosts:
controller_option += f"{member.hostname_or_ip},"

sr_group_name = "xcp-sr-" + storage_pool_name.replace("/", "_")
diskfuls = _get_diskful_hosts(host, controller_option, sr_group_name, vdi_uuid)
diskless = []
for member in host.pool.hosts:
if member.param_get("name-label").lower() not in diskfuls:
diskless += [member]
assert diskless

# Start VM on host with diskless resource
vm.start(on=diskless[0].uuid)
vm.wait_for_os_booted()
_ensure_resource_remain_diskless(host, controller_option, sr_group_name, vdi_uuid, diskless)
# 2. Get VM VDI.
vm = vm_on_linstor_sr
vdi = vm.vdis[0]

vm.shutdown(verify=True)
_ensure_resource_remain_diskless(host, controller_option, sr_group_name, vdi_uuid, diskless)
# 3. Create a snap to ensure VDI cannot be coalesced during diskless checks.
# To be more clear: if a coalesce is executed on the leaf, the VDI path is modified,
# and we must prevent this situation otherwise we can't compare diskless state
# between VM running and stopped.
snap = vdi.snapshot()

try:
# 4. Fetch DRBD diskless.
diskfuls = _get_diskful_hosts(host, controller_option, sr_group_name, vdi.uuid)
diskless = []
for member in host.pool.hosts:
if member.name().lower() not in diskfuls:
diskless += [member]
assert diskless

# 5. Verify diskless state after VM boot and shutdown.
vm.start(on=diskless[0].uuid)
vm.wait_for_os_booted()
_ensure_resource_remain_diskless(host, controller_option, sr_group_name, vdi.uuid, diskless)

vm.shutdown(verify=True)
_ensure_resource_remain_diskless(host, controller_option, sr_group_name, vdi.uuid, diskless)
finally:
snap.destroy()
Loading