Skip to content

Commit 04760e2

Browse files
authored
VirtualCapacity now updated in MachineClass's NodeTemplate without change in MachineClass name (#1545)
* virtual capacity now set in node template without hash change * updated usage * ran make generate * tidied and added doc comment * new hash only for k8s version check > 1.33.4 * reverted to master * revised ComputeAdditionalHashDataV2 for NodeTemplate.VirtualCapacity * regenerated usign make generate * enhanced and added comments to VirtualCapacityChanged test
1 parent a5317da commit 04760e2

3 files changed

Lines changed: 161 additions & 18 deletions

File tree

docs/usage/usage.md

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -571,6 +571,8 @@ spec:
571571
memory: 50Gi # inherited from pool's machine type if un-specified
572572
ephemeral-storage: 10Gi # override to specify explicit ephemeral-storage for scale fro zero
573573
resource.com/dongle: 4 # Example of a custom, extended resource.
574+
virtualCapacity:
575+
subdomain.domain.com/resource-name: 1234567 # should hot update node capacity without rollout
574576
```
575577

576578
The `.volume.iops` is the number of I/O operations per second (IOPS) that the volume supports.

pkg/controller/worker/machines.go

Lines changed: 34 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@ package worker
66

77
import (
88
"context"
9+
"encoding/json"
910
"fmt"
1011
"maps"
1112
"path/filepath"
@@ -104,7 +105,7 @@ func (w *WorkerDelegate) generateMachineConfig(ctx context.Context) error {
104105
}
105106
}
106107

107-
workerPoolHash, err := w.generateWorkerPoolHash(pool)
108+
workerPoolHash, err := w.generateWorkerPoolHash(pool, workerConfig)
108109
if err != nil {
109110
return err
110111
}
@@ -194,16 +195,21 @@ func (w *WorkerDelegate) generateMachineConfig(ctx context.Context) error {
194195
var nodeTemplate machinev1alpha1.NodeTemplate
195196
if pool.NodeTemplate != nil {
196197
nodeTemplate = machinev1alpha1.NodeTemplate{
197-
Capacity: pool.NodeTemplate.Capacity,
198-
InstanceType: pool.MachineType,
199-
Region: w.worker.Spec.Region,
200-
Zone: zone,
201-
Architecture: &arch,
198+
Capacity: pool.NodeTemplate.Capacity,
199+
VirtualCapacity: pool.NodeTemplate.VirtualCapacity,
200+
InstanceType: pool.MachineType,
201+
Region: w.worker.Spec.Region,
202+
Zone: zone,
203+
Architecture: &arch,
202204
}
203205
}
204206
if workerConfig.NodeTemplate != nil {
205-
// Support providerConfig extended resources by copying into node template capacity
207+
// Support providerConfig extended resources by copying into node template capacity and virtualCapacity
206208
maps.Copy(nodeTemplate.Capacity, workerConfig.NodeTemplate.Capacity)
209+
if nodeTemplate.VirtualCapacity == nil {
210+
nodeTemplate.VirtualCapacity = corev1.ResourceList{}
211+
}
212+
maps.Copy(nodeTemplate.VirtualCapacity, workerConfig.NodeTemplate.VirtualCapacity)
207213
}
208214
machineClassSpec["nodeTemplate"] = nodeTemplate
209215

@@ -360,8 +366,12 @@ func (w *WorkerDelegate) computeBlockDevices(pool extensionsv1alpha1.WorkerPool,
360366
return blockDevices, nil
361367
}
362368

363-
func (w *WorkerDelegate) generateWorkerPoolHash(pool extensionsv1alpha1.WorkerPool) (string, error) {
364-
return worker.WorkerPoolHash(pool, w.cluster, ComputeAdditionalHashDataV1(pool), ComputeAdditionalHashDataV2(pool), ComputeAdditionalHashDataInPlace(pool))
369+
func (w *WorkerDelegate) generateWorkerPoolHash(pool extensionsv1alpha1.WorkerPool, workerConfig *awsapi.WorkerConfig) (string, error) {
370+
v2HashData, err := ComputeAdditionalHashDataV2(pool, workerConfig)
371+
if err != nil {
372+
return "", err
373+
}
374+
return worker.WorkerPoolHash(pool, w.cluster, ComputeAdditionalHashDataV1(pool), v2HashData, ComputeAdditionalHashDataInPlace(pool))
365375
}
366376

367377
func computeEBSForVolume(volume extensionsv1alpha1.Volume) (map[string]interface{}, error) {
@@ -435,16 +445,29 @@ func ComputeAdditionalHashDataV1(pool extensionsv1alpha1.WorkerPool) []string {
435445

436446
// ComputeAdditionalHashDataV2 computes additional hash data for the worker pool. It returns a slice of strings containing the
437447
// additional data used for hashing.
438-
func ComputeAdditionalHashDataV2(pool extensionsv1alpha1.WorkerPool) []string {
448+
func ComputeAdditionalHashDataV2(pool extensionsv1alpha1.WorkerPool, workerConfig *awsapi.WorkerConfig) ([]string, error) {
439449
var additionalData = ComputeAdditionalHashDataV1(pool)
440450

451+
if workerConfig != nil && workerConfig.NodeTemplate != nil && workerConfig.NodeTemplate.VirtualCapacity != nil {
452+
// Addition or Change in VirtualCapacity should NOT cause existing hash to change to prevent trigger of rollout.
453+
// TODO: once the MCM supports Machine Hot-Update from the WorkerConfig, this hash data logic can be made smarter
454+
workerConfigCopy := workerConfig.DeepCopy()
455+
workerConfigCopy.NodeTemplate.VirtualCapacity = nil
456+
data, err := json.Marshal(workerConfigCopy)
457+
if err != nil {
458+
return nil, err
459+
}
460+
additionalData = append(additionalData, string(data))
461+
return additionalData, nil
462+
}
463+
441464
// in the future, we may not calculate a hash for the whole ProviderConfig
442465
// for example volume IOPS changes could be done in place
443466
if pool.ProviderConfig != nil && pool.ProviderConfig.Raw != nil {
444467
additionalData = append(additionalData, string(pool.ProviderConfig.Raw))
445468
}
446469

447-
return additionalData
470+
return additionalData, nil
448471
}
449472

450473
// ComputeAdditionalHashDataInPlace computes additional hash data for a worker pool with in-place update strategy.

pkg/controller/worker/machines_test.go

Lines changed: 125 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -34,6 +34,7 @@ import (
3434
"k8s.io/apimachinery/pkg/runtime"
3535
"k8s.io/apimachinery/pkg/runtime/serializer"
3636
"k8s.io/apimachinery/pkg/util/intstr"
37+
"k8s.io/apimachinery/pkg/util/sets"
3738
"k8s.io/utils/ptr"
3839
"sigs.k8s.io/controller-runtime/pkg/client"
3940

@@ -1149,24 +1150,30 @@ var _ = Describe("Machines", func() {
11491150
Expect(err).To(HaveOccurred())
11501151
})
11511152

1152-
It("should return generate machine classes with core and extended resources in the nodeTemplate", func() {
1153+
It("should return generate machine classes with core, extended and virtual resources in the nodeTemplate", func() {
11531154
ephemeralStorageQuant := resource.MustParse("30Gi")
11541155
dongleName := corev1.ResourceName("resources.com/dongle")
11551156
dongleQuant := resource.MustParse("4")
1157+
virtualResourceName := corev1.ResourceName("subdomain.domain.com/virtual-resource-name")
1158+
virtualResourceQuant := resource.MustParse("1024")
11561159
customResources := corev1.ResourceList{
11571160
corev1.ResourceEphemeralStorage: ephemeralStorageQuant,
11581161
dongleName: dongleQuant,
11591162
}
1163+
customVirtualResources := corev1.ResourceList{
1164+
virtualResourceName: virtualResourceQuant,
1165+
}
1166+
11601167
w.Spec.Pools[0].ProviderConfig = &runtime.RawExtension{
11611168
Raw: encode(&api.WorkerConfig{
11621169
NodeTemplate: &extensionsv1alpha1.NodeTemplate{
1163-
Capacity: customResources,
1170+
Capacity: customResources,
1171+
VirtualCapacity: customVirtualResources,
11641172
},
11651173
}),
11661174
}
1167-
1168-
expectedCapacity := w.Spec.Pools[0].NodeTemplate.Capacity.DeepCopy()
1169-
maps.Copy(expectedCapacity, customResources)
1175+
expectedNodeTemplateCapacity := w.Spec.Pools[0].NodeTemplate.Capacity.DeepCopy()
1176+
maps.Copy(expectedNodeTemplateCapacity, customResources)
11701177

11711178
wd, err := NewWorkerDelegate(c, decoder, scheme, chartApplier, "", w, cluster)
11721179
Expect(err).NotTo(HaveOccurred())
@@ -1178,12 +1185,123 @@ var _ = Describe("Machines", func() {
11781185
for _, mClz := range mClasses {
11791186
className := mClz["name"].(string)
11801187
if strings.Contains(className, namePool1) {
1188+
GinkgoWriter.Printf("Machine class name: %q\n", className)
11811189
nt := mClz["nodeTemplate"].(machinev1alpha1.NodeTemplate)
1182-
Expect(nt.Capacity).To(Equal(expectedCapacity))
1190+
Expect(nt.Capacity).To(Equal(expectedNodeTemplateCapacity))
1191+
Expect(nt.VirtualCapacity).To(Equal(customVirtualResources))
11831192
}
11841193
}
11851194
})
11861195
})
1196+
It("should generate machine classes with same name even when virtualCapacity is newly added or changed", Label("machineClass", "virtualCapacity"), func() {
1197+
capacityResources := corev1.ResourceList{
1198+
corev1.ResourceCPU: resource.MustParse("1"),
1199+
corev1.ResourceMemory: resource.MustParse("1Gi"),
1200+
corev1.ResourceEphemeralStorage: resource.MustParse("10Gi"),
1201+
}
1202+
w1 := w.DeepCopy()
1203+
w1.Spec.Pools[0].NodeAgentSecretName = ptr.To("dummy") // To Ensure that WorkerPoolHashV2 is used
1204+
1205+
// First, we specify a ProviderConfig with Capacity and no VirtualCapacity.
1206+
w1.Spec.Pools[0].ProviderConfig = &runtime.RawExtension{
1207+
Raw: encode(&api.WorkerConfig{
1208+
NodeTemplate: &extensionsv1alpha1.NodeTemplate{
1209+
Capacity: capacityResources,
1210+
},
1211+
}),
1212+
}
1213+
expectedNodeTemplateCapacity := w.Spec.Pools[0].NodeTemplate.Capacity.DeepCopy()
1214+
maps.Copy(expectedNodeTemplateCapacity, capacityResources)
1215+
1216+
wd1, err := NewWorkerDelegate(c, decoder, scheme, chartApplier, "", w1, cluster)
1217+
Expect(err).NotTo(HaveOccurred())
1218+
expectedUserDataSecretRefRead()
1219+
_, err = wd1.GenerateMachineDeployments(ctx)
1220+
Expect(err).NotTo(HaveOccurred())
1221+
workerDelegate1 := wd1.(*WorkerDelegate)
1222+
mClasses1 := workerDelegate1.GetMachineClasses()
1223+
classNames1 := sets.New[string]() // holds machine classes names generated with Capacity and no VirtualCapacity
1224+
for _, mClz := range mClasses1 {
1225+
className := mClz["name"].(string)
1226+
if strings.Contains(className, namePool1) {
1227+
nt := mClz["nodeTemplate"].(machinev1alpha1.NodeTemplate)
1228+
GinkgoWriter.Printf("WithOnlyCapacity: MachineClassName:%q,Capacity:%v\n", className, nt.Capacity)
1229+
classNames1.Insert(className)
1230+
Expect(nt.Capacity).To(Equal(expectedNodeTemplateCapacity))
1231+
}
1232+
}
1233+
1234+
GinkgoWriter.Println("Regenerate MachineClasses with new VirtualCapacity")
1235+
virtualResourceName := corev1.ResourceName("subdomain.domain.com/virtual-resource-name")
1236+
virtualResourceQuant1 := resource.MustParse("1024")
1237+
virtualCapacityResources1 := corev1.ResourceList{
1238+
virtualResourceName: virtualResourceQuant1,
1239+
}
1240+
w2 := w.DeepCopy()
1241+
w2.Spec.Pools[0].NodeAgentSecretName = ptr.To("dummy") // To Ensure that WorkerPoolHashV2 is used
1242+
w2.Spec.Pools[0].ProviderConfig = &runtime.RawExtension{
1243+
Raw: encode(&api.WorkerConfig{
1244+
NodeTemplate: &extensionsv1alpha1.NodeTemplate{
1245+
Capacity: capacityResources,
1246+
VirtualCapacity: virtualCapacityResources1, // We now additionally set the VirtualCapacity
1247+
},
1248+
}),
1249+
}
1250+
wd2, err := NewWorkerDelegate(c, decoder, scheme, chartApplier, "", w2, cluster)
1251+
Expect(err).NotTo(HaveOccurred())
1252+
_, err = wd2.GenerateMachineDeployments(ctx)
1253+
Expect(err).NotTo(HaveOccurred())
1254+
workerDelegate2 := wd2.(*WorkerDelegate)
1255+
mClasses2 := workerDelegate2.GetMachineClasses()
1256+
classNames2 := sets.New[string]() // holds machine classes names generated with both Capacity and new VirtualCapacity.
1257+
for _, mClz := range mClasses2 {
1258+
className := mClz["name"].(string)
1259+
if strings.Contains(className, namePool1) {
1260+
nt := mClz["nodeTemplate"].(machinev1alpha1.NodeTemplate)
1261+
GinkgoWriter.Printf("WithAdditionOfVirtualCapacity: MachineClassName:%q,Capacity:%v,VirtualCapacity:%v\n", className, nt.Capacity, nt.VirtualCapacity)
1262+
Expect(nt.Capacity).To(Equal(expectedNodeTemplateCapacity))
1263+
Expect(nt.VirtualCapacity).To(Equal(virtualCapacityResources1))
1264+
classNames2.Insert(className)
1265+
}
1266+
}
1267+
Expect(classNames1).To(Equal(classNames2))
1268+
1269+
GinkgoWriter.Println("Regenerate MachineClasses with change in VirtualCapacity")
1270+
virtualResourceQuant2 := resource.MustParse("2048")
1271+
virtualCapacityResources2 := corev1.ResourceList{
1272+
virtualResourceName: virtualResourceQuant2,
1273+
}
1274+
w3 := w.DeepCopy()
1275+
w3.Spec.Pools[0].NodeAgentSecretName = ptr.To("dummy") // To Ensure that WorkerPoolHashV2 is used
1276+
w3.Spec.Pools[0].ProviderConfig = &runtime.RawExtension{
1277+
Raw: encode(&api.WorkerConfig{
1278+
NodeTemplate: &extensionsv1alpha1.NodeTemplate{
1279+
Capacity: capacityResources,
1280+
VirtualCapacity: virtualCapacityResources2, // We now change the VirtualCapacity
1281+
},
1282+
}),
1283+
}
1284+
wd3, err := NewWorkerDelegate(c, decoder, scheme, chartApplier, "", w3, cluster)
1285+
Expect(err).NotTo(HaveOccurred())
1286+
_, err = wd3.GenerateMachineDeployments(ctx)
1287+
Expect(err).NotTo(HaveOccurred())
1288+
workerDelegate3 := wd3.(*WorkerDelegate)
1289+
mClasses3 := workerDelegate3.GetMachineClasses()
1290+
classNames3 := sets.New[string]() // holds machine classes names generated with both Capacity and VirtualCapacity changed
1291+
for _, mClz := range mClasses3 {
1292+
className := mClz["name"].(string)
1293+
if strings.Contains(className, namePool1) {
1294+
nt := mClz["nodeTemplate"].(machinev1alpha1.NodeTemplate)
1295+
GinkgoWriter.Printf("WithChangeOfVirtualCapacity: MachineClassName:%q,Capacity:%v,VirtualCapacity:%v\n", className, nt.Capacity, nt.VirtualCapacity)
1296+
Expect(nt.Capacity).To(Equal(expectedNodeTemplateCapacity))
1297+
Expect(nt.VirtualCapacity).To(Equal(virtualCapacityResources2))
1298+
classNames3.Insert(className)
1299+
}
1300+
}
1301+
// classNames with change in VirtualCapacity should be unchanged
1302+
Expect(classNames3).To(Equal(classNames2))
1303+
Expect(classNames3).To(Equal(classNames1))
1304+
})
11871305

11881306
It("should fail because the version is invalid", func() {
11891307
clusterWithoutImages.Shoot.Spec.Kubernetes.Version = "invalid"
@@ -1441,7 +1559,7 @@ var _ = Describe("Machines", func() {
14411559
})
14421560

14431561
It("should return the expected hash data for Rolling update strategy", func() {
1444-
Expect(ComputeAdditionalHashDataV2(pool)).To(Equal([]string{
1562+
Expect(ComputeAdditionalHashDataV2(pool, &workerConfig)).To(Equal([]string{
14451563
"true",
14461564
"10Gi",
14471565
"type1",

0 commit comments

Comments
 (0)