@@ -33,6 +33,7 @@ import (
3333 vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1"
3434 controllerfetcher "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/target/controller_fetcher"
3535 "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/test"
36+ vpa_api_util "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/vpa"
3637)
3738
3839var (
@@ -453,6 +454,41 @@ func TestUpdatePodSelector(t *testing.T) {
453454 assert .Contains (t , vpa .aggregateContainerStates , cluster .aggregateStateKeyForContainerID (testContainerID ))
454455}
455456
457+ // Creates a VPA without a pruning grace period annotation along with a matching pod and container.
458+ // Then add a VPA pruningGracePeriod annotation such that the related containerAggregateState would
459+ // now be considered stale. Finally, update the VPA to a long grace period to make the aggregate
460+ // active again. Verifies that stale aggregates can be pruned by applying and removing the annotation.
461+ func TestUpdatePruningGracePeriod (t * testing.T ) {
462+ shortGracePeriodAnnotations := vpaAnnotationsMap {vpa_api_util .VpaPruningGracePeriodAnnotation : "0" }
463+ longGracePeriodAnnotations := vpaAnnotationsMap {vpa_api_util .VpaPruningGracePeriodAnnotation : "1000000" }
464+ now := time .Now ()
465+
466+ cluster := NewClusterState (testGcPeriod )
467+ vpa := addTestVpa (cluster )
468+ addTestPod (cluster )
469+ addTestContainer (t , cluster )
470+ testAggregateStateKey := cluster .aggregateStateKeyForContainerID (testContainerID )
471+
472+ // Make sure aggregate was created.
473+ aggregateContainerState , aggregateStateExists := cluster .aggregateStateMap [testAggregateStateKey ]
474+ assert .True (t , aggregateStateExists )
475+ assert .Contains (t , vpa .aggregateContainerStates , testAggregateStateKey )
476+ // Check that the aggregate is not stale.
477+ assert .False (t , aggregateContainerState .IsAggregateStale (now ))
478+
479+ vpaObject := test .VerticalPodAutoscaler ().WithNamespace (testVpaID .Namespace ).
480+ WithName (testVpaID .VpaName ).WithContainer (testContainerID .ContainerName ).WithAnnotations (shortGracePeriodAnnotations ).WithTargetRef (testTargetRef ).Get ()
481+ addVpaObject (cluster , testVpaID , vpaObject , testSelectorStr )
482+ // Check that the aggregate is stale.
483+ assert .True (t , aggregateContainerState .IsAggregateStale (now ))
484+
485+ vpaObject = test .VerticalPodAutoscaler ().WithNamespace (testVpaID .Namespace ).
486+ WithName (testVpaID .VpaName ).WithContainer (testContainerID .ContainerName ).WithAnnotations (longGracePeriodAnnotations ).WithTargetRef (testTargetRef ).Get ()
487+ addVpaObject (cluster , testVpaID , vpaObject , testSelectorStr )
488+ // Check that the aggregate is not stale.
489+ assert .False (t , aggregateContainerState .IsAggregateStale (now ))
490+ }
491+
456492// Test setting ResourcePolicy and UpdatePolicy on adding or updating VPA object
457493func TestAddOrUpdateVPAPolicies (t * testing.T ) {
458494 testVpaBuilder := test .VerticalPodAutoscaler ().WithName (testVpaID .VpaName ).
@@ -943,3 +979,93 @@ func TestVPAWithMatchingPods(t *testing.T) {
943979 })
944980 }
945981}
982+
983+ func TestSetVPAContainersPerPod (t * testing.T ) {
984+ cases := []struct {
985+ name string
986+ containers []ContainerID
987+ expectedContainersPerPod int
988+ }{
989+ {
990+ name : "Single container" ,
991+ containers : []ContainerID {
992+ {testPodID , "container-1" },
993+ },
994+ expectedContainersPerPod : 1 ,
995+ }, {
996+ name : "Two containers" ,
997+ containers : []ContainerID {
998+ {testPodID , "container-1" },
999+ {testPodID , "container-2" },
1000+ },
1001+ expectedContainersPerPod : 2 ,
1002+ }, {
1003+ name : "Three containers" ,
1004+ containers : []ContainerID {
1005+ {testPodID , "container-1" },
1006+ {testPodID , "container-2" },
1007+ {testPodID , "container-3" },
1008+ },
1009+ expectedContainersPerPod : 3 ,
1010+ },
1011+ }
1012+
1013+ for _ , tc := range cases {
1014+ t .Run (tc .name , func (t * testing.T ) {
1015+ cluster := NewClusterState (testGcPeriod )
1016+ vpa := addTestVpa (cluster )
1017+ pod := addTestPod (cluster )
1018+ for _ , container := range tc .containers {
1019+ assert .NoError (t , cluster .AddOrUpdateContainer (container , testRequest ))
1020+ }
1021+ cluster .SetVPAContainersPerPod (pod , true )
1022+ assert .Equal (t , tc .expectedContainersPerPod , vpa .ContainersPerPod )
1023+ })
1024+ }
1025+ }
1026+
1027+ func TestSetVPAContainersPerPodIsHighWatermark (t * testing.T ) {
1028+ cluster := NewClusterState (testGcPeriod )
1029+ vpa := addTestVpa (cluster )
1030+ pod := addTestPod (cluster )
1031+ containers := []ContainerID {
1032+ {testPodID , "container-1" },
1033+ {testPodID , "container-2" },
1034+ {testPodID , "container-3" },
1035+ }
1036+ expectedLen := len (containers )
1037+ for _ , container := range containers {
1038+ assert .NoError (t , cluster .AddOrUpdateContainer (container , testRequest ))
1039+ }
1040+ cluster .SetVPAContainersPerPod (pod , true )
1041+ assert .Equal (t , len (containers ), vpa .ContainersPerPod )
1042+
1043+ // It's possible we could be in the middle of a rollout, and the new ReplicaSet could contain pods could have 2 containers.
1044+ // We should not update the container per pod (high watermark) in this case to preserve the recommendation splitting.
1045+ cluster .AddOrUpdatePod (testPodID3 , testLabels , apiv1 .PodRunning )
1046+ pod3 := cluster .Pods [testPodID3 ]
1047+ containers = []ContainerID {
1048+ {testPodID3 , "container-1" },
1049+ {testPodID3 , "container-2" },
1050+ }
1051+ for _ , container := range containers {
1052+ assert .NoError (t , cluster .AddOrUpdateContainer (container , testRequest ))
1053+ }
1054+ cluster .SetVPAContainersPerPod (pod3 , true )
1055+ assert .Equal (t , expectedLen , vpa .ContainersPerPod )
1056+
1057+ // But if we add more than the high watermark, we should update it.
1058+ cluster .AddOrUpdatePod (testPodID4 , testLabels , apiv1 .PodRunning )
1059+ pod4 := cluster .Pods [testPodID4 ]
1060+ containers = []ContainerID {
1061+ {testPodID4 , "container-1" },
1062+ {testPodID4 , "container-2" },
1063+ {testPodID4 , "container-3" },
1064+ {testPodID4 , "container-4" },
1065+ }
1066+ for _ , container := range containers {
1067+ assert .NoError (t , cluster .AddOrUpdateContainer (container , testRequest ))
1068+ }
1069+ cluster .SetVPAContainersPerPod (pod4 , true )
1070+ assert .Equal (t , len (containers ), vpa .ContainersPerPod )
1071+ }
0 commit comments