Skip to content

Commit b8d78c6

Browse files
committed
VPA: Add unit tests for pruning grace period and containersPerPod field
Signed-off-by: Max Cao <[email protected]>
1 parent 24c4eba commit b8d78c6

File tree

2 files changed

+218
-1
lines changed

2 files changed

+218
-1
lines changed

vertical-pod-autoscaler/pkg/recommender/model/cluster_test.go

Lines changed: 126 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -33,6 +33,7 @@ import (
3333
vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1"
3434
controllerfetcher "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/target/controller_fetcher"
3535
"k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/test"
36+
vpa_api_util "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/vpa"
3637
)
3738

3839
var (
@@ -453,6 +454,41 @@ func TestUpdatePodSelector(t *testing.T) {
453454
assert.Contains(t, vpa.aggregateContainerStates, cluster.aggregateStateKeyForContainerID(testContainerID))
454455
}
455456

457+
// Creates a VPA without a pruning grace period annotation along with a matching pod and container.
458+
// Then add a VPA pruningGracePeriod annotation such that the related containerAggregateState would
459+
// now be considered stale. Finally, update the VPA to a long grace period to make the aggregate
460+
// active again. Verifies that stale aggregates can be pruned by applying and removing the annotation.
461+
func TestUpdatePruningGracePeriod(t *testing.T) {
462+
shortGracePeriodAnnotations := vpaAnnotationsMap{vpa_api_util.VpaPruningGracePeriodAnnotation: "0"}
463+
longGracePeriodAnnotations := vpaAnnotationsMap{vpa_api_util.VpaPruningGracePeriodAnnotation: "1000000"}
464+
now := time.Now()
465+
466+
cluster := NewClusterState(testGcPeriod)
467+
vpa := addTestVpa(cluster)
468+
addTestPod(cluster)
469+
addTestContainer(t, cluster)
470+
testAggregateStateKey := cluster.aggregateStateKeyForContainerID(testContainerID)
471+
472+
// Make sure aggregate was created.
473+
aggregateContainerState, aggregateStateExists := cluster.aggregateStateMap[testAggregateStateKey]
474+
assert.True(t, aggregateStateExists)
475+
assert.Contains(t, vpa.aggregateContainerStates, testAggregateStateKey)
476+
// Check that the aggregate is not stale.
477+
assert.False(t, aggregateContainerState.IsAggregateStale(now))
478+
479+
vpaObject := test.VerticalPodAutoscaler().WithNamespace(testVpaID.Namespace).
480+
WithName(testVpaID.VpaName).WithContainer(testContainerID.ContainerName).WithAnnotations(shortGracePeriodAnnotations).WithTargetRef(testTargetRef).Get()
481+
addVpaObject(cluster, testVpaID, vpaObject, testSelectorStr)
482+
// Check that the aggregate is stale.
483+
assert.True(t, aggregateContainerState.IsAggregateStale(now))
484+
485+
vpaObject = test.VerticalPodAutoscaler().WithNamespace(testVpaID.Namespace).
486+
WithName(testVpaID.VpaName).WithContainer(testContainerID.ContainerName).WithAnnotations(longGracePeriodAnnotations).WithTargetRef(testTargetRef).Get()
487+
addVpaObject(cluster, testVpaID, vpaObject, testSelectorStr)
488+
// Check that the aggregate is not stale.
489+
assert.False(t, aggregateContainerState.IsAggregateStale(now))
490+
}
491+
456492
// Test setting ResourcePolicy and UpdatePolicy on adding or updating VPA object
457493
func TestAddOrUpdateVPAPolicies(t *testing.T) {
458494
testVpaBuilder := test.VerticalPodAutoscaler().WithName(testVpaID.VpaName).
@@ -943,3 +979,93 @@ func TestVPAWithMatchingPods(t *testing.T) {
943979
})
944980
}
945981
}
982+
983+
func TestSetVPAContainersPerPod(t *testing.T) {
984+
cases := []struct {
985+
name string
986+
containers []ContainerID
987+
expectedContainersPerPod int
988+
}{
989+
{
990+
name: "Single container",
991+
containers: []ContainerID{
992+
{testPodID, "container-1"},
993+
},
994+
expectedContainersPerPod: 1,
995+
}, {
996+
name: "Two containers",
997+
containers: []ContainerID{
998+
{testPodID, "container-1"},
999+
{testPodID, "container-2"},
1000+
},
1001+
expectedContainersPerPod: 2,
1002+
}, {
1003+
name: "Three containers",
1004+
containers: []ContainerID{
1005+
{testPodID, "container-1"},
1006+
{testPodID, "container-2"},
1007+
{testPodID, "container-3"},
1008+
},
1009+
expectedContainersPerPod: 3,
1010+
},
1011+
}
1012+
1013+
for _, tc := range cases {
1014+
t.Run(tc.name, func(t *testing.T) {
1015+
cluster := NewClusterState(testGcPeriod)
1016+
vpa := addTestVpa(cluster)
1017+
pod := addTestPod(cluster)
1018+
for _, container := range tc.containers {
1019+
assert.NoError(t, cluster.AddOrUpdateContainer(container, testRequest))
1020+
}
1021+
cluster.SetVPAContainersPerPod(pod, true)
1022+
assert.Equal(t, tc.expectedContainersPerPod, vpa.ContainersPerPod)
1023+
})
1024+
}
1025+
}
1026+
1027+
func TestSetVPAContainersPerPodIsHighWatermark(t *testing.T) {
1028+
cluster := NewClusterState(testGcPeriod)
1029+
vpa := addTestVpa(cluster)
1030+
pod := addTestPod(cluster)
1031+
containers := []ContainerID{
1032+
{testPodID, "container-1"},
1033+
{testPodID, "container-2"},
1034+
{testPodID, "container-3"},
1035+
}
1036+
expectedLen := len(containers)
1037+
for _, container := range containers {
1038+
assert.NoError(t, cluster.AddOrUpdateContainer(container, testRequest))
1039+
}
1040+
cluster.SetVPAContainersPerPod(pod, true)
1041+
assert.Equal(t, len(containers), vpa.ContainersPerPod)
1042+
1043+
// It's possible we could be in the middle of a rollout, and the new ReplicaSet could contain pods could have 2 containers.
1044+
// We should not update the container per pod (high watermark) in this case to preserve the recommendation splitting.
1045+
cluster.AddOrUpdatePod(testPodID3, testLabels, apiv1.PodRunning)
1046+
pod3 := cluster.Pods[testPodID3]
1047+
containers = []ContainerID{
1048+
{testPodID3, "container-1"},
1049+
{testPodID3, "container-2"},
1050+
}
1051+
for _, container := range containers {
1052+
assert.NoError(t, cluster.AddOrUpdateContainer(container, testRequest))
1053+
}
1054+
cluster.SetVPAContainersPerPod(pod3, true)
1055+
assert.Equal(t, expectedLen, vpa.ContainersPerPod)
1056+
1057+
// But if we add more than the high watermark, we should update it.
1058+
cluster.AddOrUpdatePod(testPodID4, testLabels, apiv1.PodRunning)
1059+
pod4 := cluster.Pods[testPodID4]
1060+
containers = []ContainerID{
1061+
{testPodID4, "container-1"},
1062+
{testPodID4, "container-2"},
1063+
{testPodID4, "container-3"},
1064+
{testPodID4, "container-4"},
1065+
}
1066+
for _, container := range containers {
1067+
assert.NoError(t, cluster.AddOrUpdateContainer(container, testRequest))
1068+
}
1069+
cluster.SetVPAContainersPerPod(pod4, true)
1070+
assert.Equal(t, len(containers), vpa.ContainersPerPod)
1071+
}

vertical-pod-autoscaler/pkg/recommender/model/vpa_test.go

Lines changed: 92 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,6 @@ import (
3131

3232
var (
3333
anyTime = time.Unix(0, 0)
34-
// TODO(maxcao13): write tests for the pruningGracePeriod field
3534
)
3635

3736
func TestMergeAggregateContainerState(t *testing.T) {
@@ -435,6 +434,98 @@ func TestDeleteAggregation(t *testing.T) {
435434
}
436435
}
437436

437+
func TestDeleteAllAggregatesByContainerName(t *testing.T) {
438+
cases := []struct {
439+
name string
440+
toDelete string
441+
aggregateContainerStates aggregateContainerStatesMap
442+
initialContainerAggregates ContainerNameToAggregateStateMap
443+
expectedAggregatesLeft int
444+
expectedInitialAggregatesLeft int
445+
}{
446+
{
447+
name: "Deletes all aggregates since it's the only container name",
448+
toDelete: "container",
449+
aggregateContainerStates: aggregateContainerStatesMap{
450+
aggregateStateKey{
451+
namespace: "ns",
452+
containerName: "container",
453+
labelSetKey: "labelSetKey",
454+
labelSetMap: nil,
455+
}: &AggregateContainerState{},
456+
aggregateStateKey{
457+
namespace: "ns",
458+
containerName: "container",
459+
labelSetKey: "labelSetKey2",
460+
labelSetMap: nil,
461+
}: &AggregateContainerState{},
462+
},
463+
initialContainerAggregates: ContainerNameToAggregateStateMap{
464+
"container": &AggregateContainerState{},
465+
},
466+
expectedAggregatesLeft: 0,
467+
expectedInitialAggregatesLeft: 0,
468+
},
469+
{
470+
name: "Deletes only the aggregates with the specified container name",
471+
toDelete: "container",
472+
aggregateContainerStates: aggregateContainerStatesMap{
473+
aggregateStateKey{
474+
namespace: "ns",
475+
containerName: "container",
476+
labelSetKey: "labelSetKey",
477+
labelSetMap: nil,
478+
}: &AggregateContainerState{},
479+
aggregateStateKey{
480+
namespace: "ns",
481+
containerName: "container2",
482+
labelSetKey: "labelSetKey2",
483+
labelSetMap: nil,
484+
}: &AggregateContainerState{},
485+
},
486+
initialContainerAggregates: ContainerNameToAggregateStateMap{
487+
"container": &AggregateContainerState{},
488+
"container2": &AggregateContainerState{},
489+
},
490+
expectedAggregatesLeft: 1,
491+
expectedInitialAggregatesLeft: 1,
492+
},
493+
{
494+
name: "Deletes none of the aggregates since the container name doesn't match",
495+
toDelete: "nonexistent",
496+
aggregateContainerStates: aggregateContainerStatesMap{
497+
aggregateStateKey{
498+
namespace: "ns",
499+
containerName: "container",
500+
labelSetKey: "labelSetKey",
501+
labelSetMap: nil,
502+
}: &AggregateContainerState{},
503+
aggregateStateKey{
504+
namespace: "ns",
505+
containerName: "container2",
506+
labelSetKey: "labelSetKey2",
507+
labelSetMap: nil,
508+
}: &AggregateContainerState{},
509+
},
510+
initialContainerAggregates: ContainerNameToAggregateStateMap{
511+
"container": &AggregateContainerState{},
512+
"container2": &AggregateContainerState{},
513+
},
514+
expectedAggregatesLeft: 2,
515+
expectedInitialAggregatesLeft: 2,
516+
},
517+
}
518+
for _, tc := range cases {
519+
t.Run(tc.name, func(t *testing.T) {
520+
vpa := Vpa{
521+
aggregateContainerStates: tc.aggregateContainerStates,
522+
}
523+
vpa.DeleteAllAggregatesByContainerName(tc.toDelete)
524+
assert.Equal(t, tc.expectedAggregatesLeft, len(vpa.aggregateContainerStates))
525+
})
526+
}
527+
}
528+
438529
func TestSetResourcePolicy(t *testing.T) {
439530
scalingModeAuto := vpa_types.ContainerScalingModeAuto
440531
scalingModeOff := vpa_types.ContainerScalingModeOff

0 commit comments

Comments
 (0)