4949// than pod with 100M current memory and 150M recommendation (100% increase vs 50% increase)
5050type UpdatePriorityCalculator struct {
5151 vpa * vpa_types.VerticalPodAutoscaler
52- pods []PrioritizedPod
52+ pods []prioritizedPod
5353 config * UpdateConfig
5454 recommendationProcessor vpa_api_util.RecommendationProcessor
5555 priorityProcessor PriorityProcessor
@@ -117,8 +117,6 @@ func (calc *UpdatePriorityCalculator) AddPod(pod *apiv1.Pod, now time.Time) {
117117 }
118118 }
119119
120- disruptionlessRecommendation := calc .CalculateDisruptionFreeActions (pod , processedRecommendation )
121-
122120 // The update is allowed in following cases:
123121 // - the request is outside the recommended range for some container.
124122 // - the pod lives for at least 24h and the resource diff is >= MinChangePriority.
@@ -129,31 +127,12 @@ func (calc *UpdatePriorityCalculator) AddPod(pod *apiv1.Pod, now time.Time) {
129127 klog .V (4 ).InfoS ("Not updating pod, missing field pod.Status.StartTime" , "pod" , klog .KObj (pod ))
130128 return
131129 }
132- // TODO(maxcao13): hopefully this doesn't break anything but we switch the order so that significant change is checked first before lifetime
133- // this way we don't in-place scale it for insignificant change, else we would mark it disruptionless and still have an in-place update
134- if updatePriority .ResourceDiff < calc .config .MinChangePriority {
135- klog .V (4 ).InfoS ("Not updating pod, resource diff too low" , "pod" , klog .KObj (pod ), "updatePriority" , updatePriority )
130+ if now .Before (pod .Status .StartTime .Add (* podLifetimeUpdateThreshold )) {
131+ klog .V (4 ).InfoS ("Not updating a short-lived pod, request within recommended range" , "pod" , klog .KObj (pod ))
136132 return
137133 }
138- if now .Before (pod .Status .StartTime .Add (* podLifetimeUpdateThreshold )) {
139- // TODO(jkyros): do we need an in-place update threshold arg ?
140- // If our recommendations are disruptionless, we can bypass the threshold limit
141- if len (disruptionlessRecommendation .ContainerRecommendations ) > 0 {
142- klog .V (2 ).InfoS ("Short-lived, but pod still accepted for disruptionless in-place update" ,
143- "pod" , klog .KObj (pod ),
144- "numContainers" , len (pod .Spec .Containers ),
145- "resourceDiff" , updatePriority .ResourceDiff ,
146- "fractionOfDisruptionlessRecommendations" , len (disruptionlessRecommendation .ContainerRecommendations )/ len (processedRecommendation .ContainerRecommendations ),
147- )
148- updatePriority .Disruptionless = true
149- calc .pods = append (calc .pods , PrioritizedPod {
150- pod : pod ,
151- priority : updatePriority ,
152- recommendation : disruptionlessRecommendation })
153- } else {
154- // we cannot perform this update disruption-free, so do not update this pod's resources
155- klog .V (4 ).InfoS ("Not updating a short-lived pod, request within recommended range" , "pod" , klog .KObj (pod ))
156- }
134+ if updatePriority .ResourceDiff < calc .config .MinChangePriority {
135+ klog .V (4 ).InfoS ("Not updating pod, resource diff too low" , "pod" , klog .KObj (pod ), "updatePriority" , updatePriority )
157136 return
158137 }
159138 }
@@ -164,30 +143,12 @@ func (calc *UpdatePriorityCalculator) AddPod(pod *apiv1.Pod, now time.Time) {
164143 return
165144 }
166145 klog .V (2 ).InfoS ("Pod accepted for update" , "pod" , klog .KObj (pod ), "updatePriority" , updatePriority .ResourceDiff , "processedRecommendations" , calc .GetProcessedRecommendationTargets (processedRecommendation ))
167- calc .pods = append (calc .pods , PrioritizedPod {
146+ calc .pods = append (calc .pods , prioritizedPod {
168147 pod : pod ,
169148 priority : updatePriority ,
170149 recommendation : processedRecommendation })
171150}
172151
173- // GetSortedPrioritizedPods returns a list of prioritized pods ordered by update priority (highest update priority first). Used instead
174- // of GetSortedPods when we need access to the priority information
175- func (calc * UpdatePriorityCalculator ) GetSortedPrioritizedPods (admission PodEvictionAdmission ) []* PrioritizedPod {
176- sort .Sort (byPriorityDesc (calc .pods ))
177-
178- //result := []*apiv1.Pod{}
179- result := []* PrioritizedPod {}
180- for num , podPrio := range calc .pods {
181- if admission .Admit (podPrio .pod , podPrio .recommendation ) {
182- result = append (result , & calc .pods [num ])
183- } else {
184- klog .V (2 ).InfoS ("Pod removed from update queue by PodEvictionAdmission" , "pod" , klog .KObj (podPrio .Pod ()))
185- }
186- }
187-
188- return result
189- }
190-
191152// GetSortedPods returns a list of pods ordered by update priority (highest update priority first)
192153// TODO(maxcao13): Not used for now, deprecated by GetSortedUpdatablePods; remove later when fixing tests
193154func (calc * UpdatePriorityCalculator ) GetSortedPods (admission PodEvictionAdmission ) []* apiv1.Pod {
@@ -247,25 +208,12 @@ func parseVpaObservedContainers(pod *apiv1.Pod) (bool, sets.Set[string]) {
247208 return hasObservedContainers , vpaContainerSet
248209}
249210
250- // PrioritizedPod contains the priority and recommendation details for a pod.
251- // TODO(jkyros): I made this public, but there may be a cleaner way
252- type PrioritizedPod struct {
211+ type prioritizedPod struct {
253212 pod * apiv1.Pod
254213 priority PodPriority
255214 recommendation * vpa_types.RecommendedPodResources
256215}
257216
258- // IsDisruptionless returns the disruptionless status of the underlying pod priority
259- // TODO(jkyros): scope issues, maybe not the best place to put Disruptionless
260- func (p PrioritizedPod ) IsDisruptionless () bool {
261- return p .priority .Disruptionless
262- }
263-
264- // Pod returns the underlying private pod
265- func (p PrioritizedPod ) Pod () * apiv1.Pod {
266- return p .pod
267- }
268-
269217// PodPriority contains data for a pod update that can be used to prioritize between updates.
270218type PodPriority struct {
271219 // Is any container outside of the recommended range.
@@ -274,11 +222,9 @@ type PodPriority struct {
274222 ScaleUp bool
275223 // Relative difference between the total requested and total recommended resources.
276224 ResourceDiff float64
277- // Is this update disruptionless
278- Disruptionless bool
279225}
280226
281- type byPriorityDesc []PrioritizedPod
227+ type byPriorityDesc []prioritizedPod
282228
283229func (list byPriorityDesc ) Len () int {
284230 return len (list )
@@ -306,67 +252,3 @@ func (p PodPriority) Less(other PodPriority) bool {
306252 // 2. A pod with larger value of resourceDiff takes precedence.
307253 return p .ResourceDiff < other .ResourceDiff
308254}
309-
310- // CalculateDisruptionFreeActions calculates the set of actions we think we can perform without disruption based on the pod/container resize/restart
311- // policies and returns that set of actions.
312- func (calc * UpdatePriorityCalculator ) CalculateDisruptionFreeActions (pod * apiv1.Pod , recommendation * vpa_types.RecommendedPodResources ) * vpa_types.RecommendedPodResources {
313-
314- var disruptionlessRecommendation = & vpa_types.RecommendedPodResources {}
315-
316- for _ , container := range pod .Spec .Containers {
317- // If we don't have a resize policy, we can't check it
318- if len (container .ResizePolicy ) == 0 {
319- continue
320- }
321-
322- // So we get whatever the recommendation was for this container
323- resourceRec := getRecommendationForContainerName (container .Name , recommendation )
324- // If we didn't find a recommendation for this container, we don't have anything to do
325- if resourceRec == nil {
326- continue
327- }
328- // Then we go through all the resource recommendations it has
329- for resource := range resourceRec .Target {
330- // And we look up what the restart policy is for those resources
331- resourceRestartPolicy := getRestartPolicyForResource (resource , container .ResizePolicy )
332- // If we don't have one, that's probably bad
333- if resourceRestartPolicy == nil {
334- continue
335- }
336- // If we do have one, and it's disruptive, then we know this won't work
337- if * resourceRestartPolicy != apiv1 .NotRequired {
338- continue
339- }
340-
341- }
342-
343- // And if we made it here, we should theoretically be able to do this without disruption
344- disruptionlessRecommendation .ContainerRecommendations = append (disruptionlessRecommendation .ContainerRecommendations , * resourceRec )
345-
346- }
347-
348- return disruptionlessRecommendation
349- }
350-
351- // getRecommendationForContainerName searches through the list of ContainerRecommendations until it finds one matching the named container. Used
352- // to match up containers with their recommendations (we have container, we want resource recommendation)
353- func getRecommendationForContainerName (name string , recommendation * vpa_types.RecommendedPodResources ) * vpa_types.RecommendedContainerResources {
354- for _ , recommendationContainer := range recommendation .ContainerRecommendations {
355- if recommendationContainer .ContainerName == name {
356- return & recommendationContainer
357- }
358- }
359- return nil
360- }
361-
362- // getRestartPolicyForResource searches through the list of resources in the resize policy until it finds the one matching the named resource. Used
363- // to match up restart policies with our resource recommendations (we have resource, we want policy).
364- func getRestartPolicyForResource (resourceName apiv1.ResourceName , policy []apiv1.ContainerResizePolicy ) * apiv1.ResourceResizeRestartPolicy {
365- // TODO(jkyros): can there be duplicate policies for resources? we just take the first one now
366- for _ , resizePolicy := range policy {
367- if resizePolicy .ResourceName == resourceName {
368- return & resizePolicy .RestartPolicy
369- }
370- }
371- return nil
372- }
0 commit comments