Skip to content

Commit 0174360

Browse files
test: Add testing for propagating the kubelet annotation on the NodeClaim (aws#7013)
1 parent c86e28e commit 0174360

File tree

12 files changed

+54
-20
lines changed

12 files changed

+54
-20
lines changed

charts/karpenter-crd/templates/karpenter.sh_nodeclaims.yaml

+1-1
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
33
kind: CustomResourceDefinition
44
metadata:
55
annotations:
6-
controller-gen.kubebuilder.io/version: v0.16.2
6+
controller-gen.kubebuilder.io/version: v0.16.3
77
name: nodeclaims.karpenter.sh
88
spec:
99
group: karpenter.sh

charts/karpenter-crd/templates/karpenter.sh_nodepools.yaml

+3-1
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
33
kind: CustomResourceDefinition
44
metadata:
55
annotations:
6-
controller-gen.kubebuilder.io/version: v0.16.2
6+
controller-gen.kubebuilder.io/version: v0.16.3
77
name: nodepools.karpenter.sh
88
spec:
99
group: karpenter.sh
@@ -71,6 +71,8 @@ spec:
7171
from a combination of nodepool and pod scheduling constraints.
7272
properties:
7373
disruption:
74+
default:
75+
consolidateAfter: 0s
7476
description: Disruption contains the parameters that relate to Karpenter's disruption logic
7577
properties:
7678
budgets:

go.mod

+1-1
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@ require (
3232
k8s.io/utils v0.0.0-20240102154912-e7106e64919e
3333
knative.dev/pkg v0.0.0-20231010144348-ca8c009405dd
3434
sigs.k8s.io/controller-runtime v0.18.4
35-
sigs.k8s.io/karpenter v1.0.1
35+
sigs.k8s.io/karpenter v1.0.2-0.20240918012643-8761b2d3add5
3636
sigs.k8s.io/yaml v1.4.0
3737
)
3838

go.sum

+2-2
Original file line numberDiff line numberDiff line change
@@ -761,8 +761,8 @@ sigs.k8s.io/controller-runtime v0.18.4 h1:87+guW1zhvuPLh1PHybKdYFLU0YJp4FhJRmiHv
761761
sigs.k8s.io/controller-runtime v0.18.4/go.mod h1:TVoGrfdpbA9VRFaRnKgk9P5/atA0pMwq+f+msb9M8Sg=
762762
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
763763
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
764-
sigs.k8s.io/karpenter v1.0.1 h1:IUx0DJ+NcdHyDnGhgbHtIlEzMIb38ElJOKmZ/oUpuqA=
765-
sigs.k8s.io/karpenter v1.0.1/go.mod h1:3NLmsnHHw8p4VutpjTOPUZyhE3qH6yGTs8O94Lsu8uw=
764+
sigs.k8s.io/karpenter v1.0.2-0.20240918012643-8761b2d3add5 h1:Z25Su8k1kfQN78arO9AKqoKrgyvo8DiO2l2I83gQ2aI=
765+
sigs.k8s.io/karpenter v1.0.2-0.20240918012643-8761b2d3add5/go.mod h1:3NLmsnHHw8p4VutpjTOPUZyhE3qH6yGTs8O94Lsu8uw=
766766
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4=
767767
sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08=
768768
sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=

pkg/apis/crds/karpenter.sh_nodeclaims.yaml

+1-1
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
33
kind: CustomResourceDefinition
44
metadata:
55
annotations:
6-
controller-gen.kubebuilder.io/version: v0.16.2
6+
controller-gen.kubebuilder.io/version: v0.16.3
77
name: nodeclaims.karpenter.sh
88
spec:
99
group: karpenter.sh

pkg/apis/crds/karpenter.sh_nodepools.yaml

+3-1
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
33
kind: CustomResourceDefinition
44
metadata:
55
annotations:
6-
controller-gen.kubebuilder.io/version: v0.16.2
6+
controller-gen.kubebuilder.io/version: v0.16.3
77
name: nodepools.karpenter.sh
88
spec:
99
group: karpenter.sh
@@ -71,6 +71,8 @@ spec:
7171
from a combination of nodepool and pod scheduling constraints.
7272
properties:
7373
disruption:
74+
default:
75+
consolidateAfter: 0s
7476
description: Disruption contains the parameters that relate to Karpenter's disruption logic
7577
properties:
7678
budgets:

pkg/cloudprovider/cloudprovider.go

+1-5
Original file line numberDiff line numberDiff line change
@@ -94,11 +94,7 @@ func (c *CloudProvider) Create(ctx context.Context, nodeClaim *karpv1.NodeClaim)
9494
return nil, cloudprovider.NewNodeClassNotReadyError(fmt.Errorf("EC2NodeClass %q is incompatible with Karpenter v1, specify your Ubuntu AMIs in your AMISelectorTerms", nodeClass.Name))
9595
}
9696
// TODO: Remove this after v1
97-
nodePool, err := utils.ResolveNodePoolFromNodeClaim(ctx, c.kubeClient, nodeClaim)
98-
if err != nil {
99-
return nil, err
100-
}
101-
kubeletHash, err := utils.GetHashKubelet(nodePool, nodeClass)
97+
kubeletHash, err := utils.GetHashKubeletWithNodeClaim(nodeClaim, nodeClass)
10298
if err != nil {
10399
return nil, err
104100
}

pkg/cloudprovider/drift.go

+1-1
Original file line numberDiff line numberDiff line change
@@ -141,7 +141,7 @@ func (c *CloudProvider) areStaticFieldsDrifted(nodeClaim *karpv1.NodeClaim, node
141141

142142
// Remove once v1beta1 is dropped
143143
func (c *CloudProvider) isKubeletConfigurationDrifted(nodeClaim *karpv1.NodeClaim, nodeClass *v1.EC2NodeClass, nodePool *karpv1.NodePool) (cloudprovider.DriftReason, error) {
144-
kubeletHash, err := utils.GetHashKubelet(nodePool, nodeClass)
144+
kubeletHash, err := utils.GetHashKubeletWithNodePool(nodePool, nodeClass)
145145
if err != nil {
146146
return "", err
147147
}

pkg/controllers/nodeclass/hash/controller.go

+1-1
Original file line numberDiff line numberDiff line change
@@ -99,7 +99,7 @@ func (c *Controller) updateNodeClaimHash(ctx context.Context, nodeClass *v1.EC2N
9999
if err != nil {
100100
return err
101101
}
102-
kubeletHash, err := utils.GetHashKubelet(nodePool, nodeClass)
102+
kubeletHash, err := utils.GetHashKubeletWithNodePool(nodePool, nodeClass)
103103
if err != nil {
104104
return err
105105
}

pkg/controllers/nodeclass/hash/suite_test.go

+3-3
Original file line numberDiff line numberDiff line change
@@ -171,7 +171,7 @@ var _ = Describe("NodeClass Hash Controller", func() {
171171
},
172172
})
173173
ExpectApplied(ctx, env.Client, nodeClass, nodeClaim, nodePool)
174-
expectedHash, _ := utils.GetHashKubelet(nodePool, nodeClass)
174+
expectedHash, _ := utils.GetHashKubeletWithNodePool(nodePool, nodeClass)
175175

176176
ExpectObjectReconciled(ctx, env.Client, hashController, nodeClass)
177177
nodeClaim = ExpectExists(ctx, env.Client, nodeClaim)
@@ -201,7 +201,7 @@ var _ = Describe("NodeClass Hash Controller", func() {
201201
PodsPerCore: lo.ToPtr(int32(9334283)),
202202
}
203203
ExpectApplied(ctx, env.Client, nodeClass, nodeClaim, nodePool)
204-
expectedHash, _ := utils.GetHashKubelet(nodePool, nodeClass)
204+
expectedHash, _ := utils.GetHashKubeletWithNodePool(nodePool, nodeClass)
205205

206206
ExpectObjectReconciled(ctx, env.Client, hashController, nodeClass)
207207
nodeClaim = ExpectExists(ctx, env.Client, nodeClaim)
@@ -435,7 +435,7 @@ var _ = Describe("NodeClass Hash Controller", func() {
435435
PodsPerCore: lo.ToPtr(int32(9334283)),
436436
}
437437
ExpectApplied(ctx, env.Client, nodeClass, nodeClaim, nodePool)
438-
expectedHash, _ := utils.GetHashKubelet(nil, nodeClass)
438+
expectedHash, _ := utils.GetHashKubeletWithNodePool(nil, nodeClass)
439439

440440
ExpectObjectReconciled(ctx, env.Client, hashController, nodeClass)
441441
nodeClaim = ExpectExists(ctx, env.Client, nodeClaim)

pkg/utils/utils.go

+13-1
Original file line numberDiff line numberDiff line change
@@ -142,7 +142,19 @@ func parseKubeletConfiguration(annotation string) (*v1.KubeletConfiguration, err
142142
}, nil
143143
}
144144

145-
func GetHashKubelet(nodePool *karpv1.NodePool, nodeClass *v1.EC2NodeClass) (string, error) {
145+
func GetHashKubeletWithNodeClaim(nodeClaim *karpv1.NodeClaim, nodeClass *v1.EC2NodeClass) (string, error) {
146+
kubelet, err := GetKubeletConfigurationWithNodeClaim(nodeClaim, nodeClass)
147+
if err != nil {
148+
return "", err
149+
}
150+
return fmt.Sprint(lo.Must(hashstructure.Hash(kubelet, hashstructure.FormatV2, &hashstructure.HashOptions{
151+
SlicesAsSets: true,
152+
IgnoreZeroValue: true,
153+
ZeroNil: true,
154+
}))), nil
155+
}
156+
157+
func GetHashKubeletWithNodePool(nodePool *karpv1.NodePool, nodeClass *v1.EC2NodeClass) (string, error) {
146158
kubelet, err := GetKubletConfigurationWithNodePool(nodePool, nodeClass)
147159
if err != nil {
148160
return "", err

test/suites/scheduling/suite_test.go

+24-2
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,7 @@ limitations under the License.
1515
package scheduling_test
1616

1717
import (
18+
"encoding/json"
1819
"fmt"
1920
"testing"
2021
"time"
@@ -26,6 +27,7 @@ import (
2627
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
2728
"k8s.io/apimachinery/pkg/labels"
2829
"k8s.io/apimachinery/pkg/util/sets"
30+
"sigs.k8s.io/karpenter/pkg/apis/v1beta1"
2931

3032
karpv1 "sigs.k8s.io/karpenter/pkg/apis/v1"
3133
"sigs.k8s.io/karpenter/pkg/test"
@@ -99,6 +101,28 @@ var _ = Describe("Scheduling", Ordered, ContinueOnFailure, func() {
99101
env.ExpectCreatedNodeCount("==", 1)
100102
Expect(env.GetNode(pod.Spec.NodeName).Annotations).To(And(HaveKeyWithValue("foo", "bar"), HaveKeyWithValue(karpv1.DoNotDisruptAnnotationKey, "true")))
101103
})
104+
It("should ensure the NodePool and the NodeClaim use the same view of the v1beta1 kubelet configuration", func() {
105+
v1beta1NodePool := &v1beta1.NodePool{}
106+
Expect(nodePool.ConvertTo(env.Context, v1beta1NodePool)).To(Succeed())
107+
108+
// Allow a reasonable number of pods to schedule
109+
v1beta1NodePool.Spec.Template.Spec.Kubelet = &v1beta1.KubeletConfiguration{
110+
PodsPerCore: lo.ToPtr(int32(2)),
111+
}
112+
kubeletHash, err := json.Marshal(v1beta1NodePool.Spec.Template.Spec.Kubelet)
113+
Expect(err).ToNot(HaveOccurred())
114+
// Ensure that the v1 version of the NodeClass has a configuration that won't let pods schedule
115+
nodeClass.Spec.Kubelet = &v1.KubeletConfiguration{
116+
MaxPods: lo.ToPtr(int32(0)),
117+
}
118+
pod := test.Pod()
119+
env.ExpectCreated(nodeClass, v1beta1NodePool, pod)
120+
121+
nodeClaim := env.EventuallyExpectCreatedNodeClaimCount("==", 1)[0]
122+
Expect(nodeClaim.Annotations).To(HaveKeyWithValue(karpv1.KubeletCompatibilityAnnotationKey, string(kubeletHash)))
123+
env.EventuallyExpectCreatedNodeCount("==", 1)
124+
env.EventuallyExpectHealthy(pod)
125+
})
102126

103127
Context("Labels", func() {
104128
It("should support well-known labels for instance type selection", func() {
@@ -487,7 +511,6 @@ var _ = Describe("Scheduling", Ordered, ContinueOnFailure, func() {
487511
Expect(lo.FromPtr(env.GetInstance(pod.Spec.NodeName).InstanceType)).To(Equal("c5.large"))
488512
Expect(env.GetNode(pod.Spec.NodeName).Labels[karpv1.NodePoolLabelKey]).To(Equal(nodePoolHighPri.Name))
489513
})
490-
491514
DescribeTable(
492515
"should provision a right-sized node when a pod has InitContainers (cpu)",
493516
func(expectedNodeCPU string, containerRequirements corev1.ResourceRequirements, initContainers ...corev1.Container) {
@@ -624,7 +647,6 @@ var _ = Describe("Scheduling", Ordered, ContinueOnFailure, func() {
624647
env.ExpectCreated(nodePool, nodeClass, pod)
625648
env.EventuallyExpectHealthy(pod)
626649
})
627-
628650
It("should provision a node for a pod with overlapping zone and zone-id requirements", func() {
629651
subnetInfo := lo.UniqBy(env.GetSubnetInfo(map[string]string{"karpenter.sh/discovery": env.ClusterName}), func(s environmentaws.SubnetInfo) string {
630652
return s.Zone

0 commit comments

Comments
 (0)