Skip to content

Commit 8c5cd27

Browse files
committed
Adding node_label_selector for pod scenarios
Signed-off-by: Sahil Shah <[email protected]>
1 parent 90c52f9 commit 8c5cd27

File tree

9 files changed

+33
-9
lines changed

9 files changed

+33
-9
lines changed

krkn/scenario_plugins/pod_disruption/models/models.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,11 +11,13 @@ def __init__(self, config: dict[str,any] = None):
1111
self.label_selector = config["label_selector"] if "label_selector" in config else ""
1212
self.namespace_pattern = config["namespace_pattern"] if "namespace_pattern" in config else ""
1313
self.name_pattern = config["name_pattern"] if "name_pattern" in config else ""
14+
self.node_label_selector = config["node_label_selector"] if "node_label_selector" in config else ""
1415

1516
namespace_pattern: str
1617
krkn_pod_recovery_time: int
1718
timeout: int
1819
duration: int
1920
kill: int
2021
label_selector: str
21-
name_pattern: str
22+
name_pattern: str
23+
node_label_selector: str

krkn/scenario_plugins/pod_disruption/pod_disruption_scenario_plugin.py

Lines changed: 21 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -106,7 +106,7 @@ def start_monitoring(self, kill_scenario: InputParams, pool: PodsMonitorPool):
106106
)
107107

108108

109-
def get_pods(self, name_pattern, label_selector,namespace, kubecli: KrknKubernetes, field_selector: str =None):
109+
def get_pods(self, name_pattern, label_selector,namespace, kubecli: KrknKubernetes, field_selector: str =None, node_label_selector: str = None):
110110
if label_selector and name_pattern:
111111
logging.error('Only, one of name pattern or label pattern can be specified')
112112
elif label_selector:
@@ -115,6 +115,22 @@ def get_pods(self, name_pattern, label_selector,namespace, kubecli: KrknKubernet
115115
pods = kubecli.select_pods_by_name_pattern_and_namespace_pattern(pod_name_pattern=name_pattern, namespace_pattern=namespace, field_selector=field_selector)
116116
else:
117117
logging.error('Name pattern or label pattern must be specified ')
118+
119+
# Filter pods by node label selector if specified
120+
if node_label_selector and pods:
121+
filtered_pods = []
122+
nodes_with_label = kubecli.list_nodes(label_selector=node_label_selector)
123+
if not nodes_with_label:
124+
return []
125+
126+
for pod_name, pod_namespace in pods:
127+
pod_info = kubecli.read_pod(pod_name, pod_namespace)
128+
pod_node_name = pod_info.spec.node_name
129+
if pod_node_name and pod_node_name in nodes_with_label:
130+
filtered_pods.append((pod_name, pod_namespace))
131+
132+
return filtered_pods
133+
118134
return pods
119135

120136
def killing_pods(self, config: InputParams, kubecli: KrknKubernetes):
@@ -124,7 +140,7 @@ def killing_pods(self, config: InputParams, kubecli: KrknKubernetes):
124140
if not namespace:
125141
logging.error('Namespace pattern must be specified')
126142

127-
pods = self.get_pods(config.name_pattern,config.label_selector,config.namespace_pattern, kubecli, field_selector="status.phase=Running")
143+
pods = self.get_pods(config.name_pattern,config.label_selector,config.namespace_pattern, kubecli, field_selector="status.phase=Running", node_label_selector=config.node_label_selector)
128144
pods_count = len(pods)
129145
if len(pods) < config.kill:
130146
logging.error("Not enough pods match the criteria, expected {} but found only {} pods".format(
@@ -133,23 +149,22 @@ def killing_pods(self, config: InputParams, kubecli: KrknKubernetes):
133149

134150
random.shuffle(pods)
135151
for i in range(config.kill):
136-
137152
pod = pods[i]
138153
logging.info(pod)
139154
logging.info(f'Deleting pod {pod[0]}')
140155
kubecli.delete_pod(pod[0], pod[1])
141156

142-
self.wait_for_pods(config.label_selector,config.name_pattern,config.namespace_pattern, pods_count, config.duration, config.timeout, kubecli)
157+
self.wait_for_pods(config.label_selector,config.name_pattern,config.namespace_pattern, pods_count, config.duration, config.timeout, kubecli, config.node_label_selector)
143158
return 0
144159

145160
def wait_for_pods(
146-
self, label_selector, pod_name, namespace, pod_count, duration, wait_timeout, kubecli: KrknKubernetes
161+
self, label_selector, pod_name, namespace, pod_count, duration, wait_timeout, kubecli: KrknKubernetes, node_label_selector
147162
):
148163
timeout = False
149164
start_time = datetime.now()
150165

151166
while not timeout:
152-
pods = self.get_pods(name_pattern=pod_name, label_selector=label_selector,namespace=namespace, field_selector="status.phase=Running", kubecli=kubecli)
167+
pods = self.get_pods(name_pattern=pod_name, label_selector=label_selector,namespace=namespace, field_selector="status.phase=Running", kubecli=kubecli, node_label_selector=node_label_selector)
153168
if pod_count == len(pods):
154169
return
155170

scenarios/kind/pod_etcd.yml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,4 +2,5 @@
22
config:
33
namespace_pattern: "kube-system"
44
label_selector: "component=etcd"
5+
node_label_selector: node-role.kubernetes.io/control-plane= # Target control-plane nodes (kind clusters)
56
krkn_pod_recovery_time: 120

scenarios/kube/pod.yml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5,3 +5,4 @@
55
namespace_pattern: ^default$
66
kill: 1
77
krkn_pod_recovery_time: 120
8+
node_label_selector: node-role.kubernetes.io/worker= # Target worker nodes only

scenarios/openshift/customapp_pod.yaml

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,4 +3,5 @@
33
config:
44
namespace_pattern: ^acme-air$
55
name_pattern: .*
6-
krkn_pod_recovery_time: 120
6+
krkn_pod_recovery_time: 120
7+
node_label_selector: node-role.kubernetes.io/master= # Target master nodes only

scenarios/openshift/openshift-apiserver.yml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3,5 +3,6 @@
33
config:
44
namespace_pattern: ^openshift-apiserver$
55
label_selector: app=openshift-apiserver-a
6+
node_label_selector: node-role.kubernetes.io/master= # Target master nodes only
67
krkn_pod_recovery_time: 120
78

scenarios/openshift/openshift-kube-apiserver.yml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3,5 +3,6 @@
33
config:
44
namespace_pattern: ^openshift-kube-apiserver$
55
label_selector: app=openshift-kube-apiserver
6+
node_label_selector: node-role.kubernetes.io/master= # Target master nodes only
67
krkn_pod_recovery_time: 120
78

scenarios/openshift/prom_kill.yml

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,4 +2,5 @@
22
config:
33
namespace_pattern: ^openshift-monitoring$
44
label_selector: statefulset.kubernetes.io/pod-name=prometheus-k8s-0
5-
krkn_pod_recovery_time: 120
5+
krkn_pod_recovery_time: 120
6+
node_label_selector: node-role.kubernetes.io/worker= # Target worker nodes only

scenarios/openshift/regex_openshift_pod_kill.yml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5,3 +5,4 @@
55
name_pattern: .*
66
kill: 3
77
krkn_pod_recovery_time: 120
8+
node_label_selector: node-role.kubernetes.io/worker=

0 commit comments

Comments
 (0)