@@ -106,7 +106,7 @@ def start_monitoring(self, kill_scenario: InputParams, pool: PodsMonitorPool):
106106 )
107107
108108
109- def get_pods (self , name_pattern , label_selector ,namespace , kubecli : KrknKubernetes , field_selector : str = None ):
109+ def get_pods (self , name_pattern , label_selector ,namespace , kubecli : KrknKubernetes , field_selector : str = None , node_label_selector : str = None ):
110110 if label_selector and name_pattern :
111111 logging .error ('Only, one of name pattern or label pattern can be specified' )
112112 elif label_selector :
@@ -115,6 +115,22 @@ def get_pods(self, name_pattern, label_selector,namespace, kubecli: KrknKubernet
115115 pods = kubecli .select_pods_by_name_pattern_and_namespace_pattern (pod_name_pattern = name_pattern , namespace_pattern = namespace , field_selector = field_selector )
116116 else :
117117 logging .error ('Name pattern or label pattern must be specified ' )
118+
119+ # Filter pods by node label selector if specified
120+ if node_label_selector and pods :
121+ filtered_pods = []
122+ nodes_with_label = kubecli .list_nodes (label_selector = node_label_selector )
123+ if not nodes_with_label :
124+ return []
125+
126+ for pod_name , pod_namespace in pods :
127+ pod_info = kubecli .read_pod (pod_name , pod_namespace )
128+ pod_node_name = pod_info .spec .node_name
129+ if pod_node_name and pod_node_name in nodes_with_label :
130+ filtered_pods .append ((pod_name , pod_namespace ))
131+
132+ return filtered_pods
133+
118134 return pods
119135
120136 def killing_pods (self , config : InputParams , kubecli : KrknKubernetes ):
@@ -124,7 +140,7 @@ def killing_pods(self, config: InputParams, kubecli: KrknKubernetes):
124140 if not namespace :
125141 logging .error ('Namespace pattern must be specified' )
126142
127- pods = self .get_pods (config .name_pattern ,config .label_selector ,config .namespace_pattern , kubecli , field_selector = "status.phase=Running" )
143+ pods = self .get_pods (config .name_pattern ,config .label_selector ,config .namespace_pattern , kubecli , field_selector = "status.phase=Running" , node_label_selector = config . node_label_selector )
128144 pods_count = len (pods )
129145 if len (pods ) < config .kill :
130146 logging .error ("Not enough pods match the criteria, expected {} but found only {} pods" .format (
@@ -133,23 +149,22 @@ def killing_pods(self, config: InputParams, kubecli: KrknKubernetes):
133149
134150 random .shuffle (pods )
135151 for i in range (config .kill ):
136-
137152 pod = pods [i ]
138153 logging .info (pod )
139154 logging .info (f'Deleting pod { pod [0 ]} ' )
140155 kubecli .delete_pod (pod [0 ], pod [1 ])
141156
142- self .wait_for_pods (config .label_selector ,config .name_pattern ,config .namespace_pattern , pods_count , config .duration , config .timeout , kubecli )
157+ self .wait_for_pods (config .label_selector ,config .name_pattern ,config .namespace_pattern , pods_count , config .duration , config .timeout , kubecli , config . node_label_selector )
143158 return 0
144159
145160 def wait_for_pods (
146- self , label_selector , pod_name , namespace , pod_count , duration , wait_timeout , kubecli : KrknKubernetes
161+ self , label_selector , pod_name , namespace , pod_count , duration , wait_timeout , kubecli : KrknKubernetes , node_label_selector
147162 ):
148163 timeout = False
149164 start_time = datetime .now ()
150165
151166 while not timeout :
152- pods = self .get_pods (name_pattern = pod_name , label_selector = label_selector ,namespace = namespace , field_selector = "status.phase=Running" , kubecli = kubecli )
167+ pods = self .get_pods (name_pattern = pod_name , label_selector = label_selector ,namespace = namespace , field_selector = "status.phase=Running" , kubecli = kubecli , node_label_selector = node_label_selector )
153168 if pod_count == len (pods ):
154169 return
155170
0 commit comments