@@ -2,7 +2,6 @@ package e2e
22
33import (
44 "context"
5- "fmt"
65 "os"
76 "sort"
87 "testing"
@@ -16,93 +15,56 @@ import (
1615 "k8s.io/client-go/tools/clientcmd"
1716)
1817
19- func TestCapcityCheck (t * testing.T ) {
20- path := os .Getenv ("KIND_KUBECONFIG" )
21- cfg , err := clientcmd .BuildConfigFromFlags ("" , path )
22- require .NoError (t , err )
23- client , err := kubernetes .NewForConfig (cfg )
24- require .NoError (t , err )
25-
26- require .Never (t , func () bool {
27- nodeList , err := client .CoreV1 ().Nodes ().List (context .TODO (), metav1.ListOptions {LabelSelector : "xkf.xenit.io/node-ttl" })
28- require .NoError (t , err )
29- for _ , node := range nodeList .Items {
30- t .Log ("checking that node is not evicted" , node .Name )
31- // TODO: There should be a better way to check that eviction is due to node ttl
32- return node .Spec .Unschedulable
33- }
34- return false
35- }, 1 * time .Minute , 5 * time .Second )
36- }
37-
3818func TestTTLEviction (t * testing.T ) {
3919 path := os .Getenv ("KIND_KUBECONFIG" )
4020 cfg , err := clientcmd .BuildConfigFromFlags ("" , path )
4121 require .NoError (t , err )
4222 client , err := kubernetes .NewForConfig (cfg )
4323 require .NoError (t , err )
4424
45- nodeList , err := client .CoreV1 ().Nodes ().List (context .TODO (), metav1.ListOptions {LabelSelector : "xkf.xenit.io/node-ttl" })
25+ nodeList , err := client .CoreV1 ().Nodes ().List (context .TODO (), metav1.ListOptions {LabelSelector : "xkf.xenit.io/node-ttl,autoscaling.k8s.io/nodegroup=asg1 " })
4626 require .NoError (t , err )
4727 nodes := nodeList .Items
4828 sort .SliceStable (nodes , func (i , j int ) bool {
4929 return nodes [j ].CreationTimestamp .After (nodes [i ].CreationTimestamp .Time )
5030 })
5131
52- nodeNames := []string {}
53- for _ , node := range nodes {
54- nodeNames = append (nodeNames , node .Name )
55- }
32+ nodeMap := getNodesMap (nodes )
33+ nodeNames := getNodeKeys (nodeMap )
5634 t .Log ("checking eviction of nodes" , nodeNames )
5735
58- for _ , node := range nodeList .Items {
59- t .Log ("waiting for node to be evicted due to TTL" , node .Name )
60-
61- require .Eventually (t , func () bool {
62- getNode , err := client .CoreV1 ().Nodes ().Get (context .TODO (), node .Name , metav1.GetOptions {})
63- require .NoError (t , err )
64- // TODO: There should be a better way to check that eviction is due to node ttl
65- if ! getNode .Spec .Unschedulable {
66- return false
67- }
68- return true
69- }, 2 * time .Minute , 1 * time .Second , "node should be evicted due to TTL" )
70- t .Log ("node has been marked unschedulable by node ttl" , node .Name )
71-
72- require .Eventually (t , func () bool {
73- podList , err := client .CoreV1 ().Pods ("" ).List (context .TODO (), metav1.ListOptions {FieldSelector : fmt .Sprintf ("spec.nodeName=%s" , node .Name )})
74- require .NoError (t , err )
75- pods := testFilterDaemonset (podList .Items )
76- if len (pods ) != 0 {
77- return false
36+ // What we want to test now is that all the nodes eventually get replaced by new ones
37+ require .Eventually (t , func () bool {
38+ for _ , name := range nodeMap {
39+ _ , err := client .CoreV1 ().Nodes ().Get (context .TODO (), name , metav1.GetOptions {})
40+ if apierrors .IsNotFound (err ) {
41+ delete (nodeMap , name )
42+ t .Logf ("node %s doesn't exist anymore, continuing with next one" , name )
43+ continue
7844 }
79- return true
80- }, 30 * time . Second , 1 * time . Second , "node should be drained" )
81- t . Log ( "node has been drained" , node . Name )
45+ }
46+ return len ( nodeMap ) == 0
47+ }, 5 * time . Minute , 5 * time . Second , "all nodes should have been evicted and replaced by new nodes" )
8248
83- // TODO: Make sure only one node is beeing evicted at once
49+ nodeList , err = client .CoreV1 ().Nodes ().List (context .TODO (), metav1.ListOptions {LabelSelector : "xkf.xenit.io/node-ttl,autoscaling.k8s.io/nodegroup=asg1" })
50+ require .NoError (t , err )
51+ nodeMap = getNodesMap (nodeList .Items )
52+ nodeNames = getNodeKeys (nodeMap )
53+ t .Log ("nodes after all nodes have been evicted" , nodeNames )
54+ }
8455
85- require .Eventually (t , func () bool {
86- _ , err := client .CoreV1 ().Nodes ().Get (context .TODO (), node .Name , metav1.GetOptions {})
87- if ! apierrors .IsNotFound (err ) {
88- return false
89- }
90- return true
91- }, 2 * time .Minute , 1 * time .Second , "node should be delted" )
92- t .Log ("underutilized node has been deleted" , node .Name )
56+ func getNodesMap (nodes []corev1.Node ) map [string ]string {
57+ nodeNames := make (map [string ]string )
58+ for _ , node := range nodes {
59+ nodeNames [node .Name ] = node .Name
9360 }
61+ return nodeNames
9462}
9563
96- func testFilterDaemonset (pods []corev1.Pod ) []corev1.Pod {
97- filteredPods := []corev1.Pod {}
98- OUTER:
99- for _ , pod := range pods {
100- for _ , ownerRef := range pod .OwnerReferences {
101- if ownerRef .APIVersion == "apps/v1" && ownerRef .Kind == "DaemonSet" {
102- continue OUTER
103- }
104- }
105- filteredPods = append (filteredPods , pod )
64+ func getNodeKeys (m map [string ]string ) []string {
65+ nodeKeys := []string {}
66+ for _ , node := range m {
67+ nodeKeys = append (nodeKeys , node )
10668 }
107- return filteredPods
69+ return nodeKeys
10870}
0 commit comments