Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 6 additions & 0 deletions tests/integration/templates/bootstrap-jumbo.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
cluster-config:
network:
enabled: true

annotations:
k8sd/v1alpha1/cilium/devices: "eth1"
139 changes: 128 additions & 11 deletions tests/integration/tests/test_networking.py
Original file line number Diff line number Diff line change
Expand Up @@ -282,17 +282,6 @@ def test_jumbo(instances: List[harness.Instance]):
), "Expected at least 45 packets out of 50 to be received in running ping"


def get_pod_ip(instance: harness.Instance, pod_name, namespace="default"):
result = instance.exec(
["k8s", "kubectl", "get", "pod", pod_name, "-n", namespace, "-o", "json"],
capture_output=True,
text=True,
check=True,
)
pod_info = json.loads(result.stdout)
return pod_info["status"]["podIP"]


@pytest.mark.node_count(2)
@pytest.mark.disable_k8s_bootstrapping()
@pytest.mark.network_type("dualnic")
Expand Down Expand Up @@ -378,3 +367,131 @@ def test_dual_nic(instances: List[harness.Instance]):
)

assert number_of_devices.stdout.strip() == "2"


@pytest.mark.node_count(2)
@pytest.mark.disable_k8s_bootstrapping()
@pytest.mark.network_type("dualnic")
@pytest.mark.tags(tags.NIGHTLY)
@pytest.mark.skipif(
config.SUBSTRATE == "multipass", reason="Not implemented for multipass"
)
def test_dual_nic_with_jumbo(instances: List[harness.Instance]):
cp_instance = instances[0]
worker_instance = instances[1]

dual_nic_bootstrap_config = (
config.MANIFESTS_DIR / "bootstrap-jumbo.yaml"
).read_text()

cp_second_nic_ip = util.get_device_ip(cp_instance, "eth1")
worker_second_nic_ip = util.get_device_ip(worker_instance, "eth1")

cp_instance.exec(
["k8s", "bootstrap", "--file", "-", "--address", cp_second_nic_ip],
input=str.encode(dual_nic_bootstrap_config),
)

join_token_worker = util.get_join_token(cp_instance, worker_instance, "--worker")
worker_instance.exec(
["k8s", "join-cluster", join_token_worker, "--address", worker_second_nic_ip]
)

util.wait_until_k8s_ready(cp_instance, instances)
util.wait_for_network(cp_instance)

util.set_node_labels(cp_instance, cp_instance.id, {"kubernetes.io/role": "master"})
util.set_node_labels(
cp_instance, worker_instance.id, {"kubernetes.io/role": "worker"}
)

manifest = MANIFESTS_DIR / "nginx-sticky-pod.yaml"
cp_instance.exec(
["k8s", "kubectl", "apply", "-f", "-"],
input=manifest.read_bytes(),
)

util.stubbornly(retries=3, delay_s=1).on(cp_instance).exec(
[
"k8s",
"kubectl",
"wait",
"--for=condition=ready",
"pod",
"nginx",
"--timeout",
"180s",
]
)

# make sure the netshoot pod is scheduled on the control plane node
# while the nginx is scheduled on the worker node
cp_instance.exec(
[
"k8s",
"kubectl",
"run",
"netshoot",
"--image=ghcr.io/nicolaka/netshoot:v0.14",
"--restart=Never",
"--overrides",
'{"spec": {"nodeSelector": {"kubernetes.io/role": "master"}}}',
"--",
"sleep",
"3600",
],
)

util.stubbornly(retries=3, delay_s=1).on(cp_instance).exec(
[
"k8s",
"kubectl",
"wait",
"--for=condition=ready",
"pod",
"-l",
"run=netshoot",
"--timeout",
"180s",
]
)

nginx_pod_ip = get_pod_ip(cp_instance, "nginx")
# Exec into netshoot and ping nginx pod IP with 8000 byte packets without fragmentation..
# The packets must be routed over the second nic (eth1) which has MTU of 9000
result = cp_instance.exec(
[
"k8s",
"kubectl",
"exec",
"netshoot",
"--",
"ping",
"-M",
"do",
"-c",
"50",
"-s",
"8000",
f"{nginx_pod_ip}",
],
capture_output=True,
)

# 50 packets transmitted, 40 received, 20% packet loss, time 9109ms
assert "50 packets transmitted" in result.stdout.decode()
packet_loss_percentage = int(result.stdout.decode().split(", ")[1].split(" ")[0])
assert (
packet_loss_percentage > 45
), "Expected ping packet loss to be less than 10 percent"


def get_pod_ip(instance: harness.Instance, pod_name, namespace="default"):
result = instance.exec(
["k8s", "kubectl", "get", "pod", pod_name, "-n", namespace, "-o", "json"],
capture_output=True,
text=True,
check=True,
)
pod_info = json.loads(result.stdout)
return pod_info["status"]["podIP"]
7 changes: 7 additions & 0 deletions tests/integration/tests/test_util/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -126,6 +126,13 @@
or (DIR / ".." / ".." / "lxd-dual-nic-profile.yaml").read_text()
)

# LXD_DUAL_NIC_JUMBO_PROFILE_NAME is the profile name to use for LXD containers
# with dual NIC setup with one of them being jumbo.
LXD_DUAL_NIC_JUMBO_PROFILE_NAME = (
os.getenv("TEST_LXD_DUAL_NIC_JUMBO_PROFILE_NAME")
or "k8s-integration-dual-nic-jumbo"
)

# LXD_IMAGE is the image to use for LXD containers.
LXD_IMAGE = os.getenv("TEST_LXD_IMAGE") or "ubuntu:22.04"

Expand Down
13 changes: 12 additions & 1 deletion tests/integration/tests/test_util/harness/lxd.py
Original file line number Diff line number Diff line change
Expand Up @@ -96,6 +96,14 @@ def __init__(self):
),
)

self.dual_nic_jumbo_profile = config.LXD_DUAL_NIC_JUMBO_PROFILE_NAME
self._configure_profile(
self.dual_nic_jumbo_profile,
config.LXD_DUAL_NIC_PROFILE.replace(
"LXD_DUAL_NIC_NETWORK", config.LXD_JUMBO_NETWORK
),
)

LOG.debug(
"Configured LXD substrate (profile %s, image %s)", self.profile, self.image
)
Expand All @@ -119,7 +127,7 @@ def new_instance(
self.profile,
]

valid_types = ["ipv4", "dualstack", "ipv6", "jumbo", "dualnic"]
valid_types = ["ipv4", "dualstack", "ipv6", "jumbo", "dualnic", "dualnic-jumbo"]
if network_type.lower() not in valid_types:
raise HarnessError(
f"unknown network type {network_type}, need to be one of {', '.join(valid_types)}"
Expand All @@ -141,6 +149,9 @@ def new_instance(
if network_type.lower() == "dualnic":
launch_lxd_command.extend(["-p", self.dual_nic_profile])

if network_type.lower() == "dualnic-jumbo":
launch_lxd_command.extend(["-p", self.dual_nic_jumbo_profile])

try:
stubbornly(retries=3, delay_s=1).exec(launch_lxd_command)
self.instances.add(instance_id)
Expand Down
26 changes: 26 additions & 0 deletions tests/integration/tests/test_util/util.py
Original file line number Diff line number Diff line change
Expand Up @@ -552,6 +552,32 @@ def get_default_ip(instance: harness.Instance, ipv6=False):
return p.stdout.decode().split(" ")[8]


def get_device_ip(instance: harness.Instance, device: str, ipv6=False):
"""Fetching IP address assigned to a specific device, e.g. eth0."""
if ipv6:
p = instance.exec(
["ip", "-json", "-6", "addr", "show", "scope", "global", "dev", device],
capture_output=True,
)
addr_json = json.loads(p.stdout.decode())
if not addr_json or not addr_json[0].get("addr_info"):
raise ValueError(
"No IPv6 address found in the output of 'ip -json -6 addr show scope global'"
)
return addr_json[0]["addr_info"][0]["local"]
else:
p = instance.exec(
["ip", "-json", "-4", "addr", "show", "scope", "global", "dev", device],
capture_output=True,
)
addr_json = json.loads(p.stdout.decode())
if not addr_json or not addr_json[0].get("addr_info"):
raise ValueError(
"No IPv6 address found in the output of 'ip -json -6 addr show scope global'"
)
return addr_json[0]["addr_info"][0]["local"]


def get_global_unicast_ipv6(instance: harness.Instance, interface="eth0") -> str | None:
# ---
# 2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc fq_codel state UP group default qlen 1000
Expand Down
Loading