Skip to content

Commit f4ad3ca

Browse files
committed
fixup! VPA: fix logs and cleanup TODOs according to review
Signed-off-by: Max Cao <[email protected]>
1 parent 0fdfd98 commit f4ad3ca

File tree

1 file changed

+10
-8
lines changed

1 file changed

+10
-8
lines changed

vertical-pod-autoscaler/pkg/updater/eviction/pods_eviction_restriction.go

Lines changed: 10 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -445,7 +445,6 @@ func (e *podsEvictionRestrictionImpl) CanInPlaceUpdate(pod *apiv1.Pod) bool {
445445
if present {
446446

447447
// If our QoS class is guaranteed, we can't change the resources without a restart
448-
// TODO(maxcao13): kubelet already prevents a resize of a guaranteed pod, so should we still check this early?
449448
if pod.Status.QOSClass == apiv1.PodQOSGuaranteed {
450449
klog.V(4).InfoS("Can't resize pod in-place", "pod", klog.KObj(pod), "qosClass", pod.Status.QOSClass)
451450
return false
@@ -458,23 +457,26 @@ func (e *podsEvictionRestrictionImpl) CanInPlaceUpdate(pod *apiv1.Pod) bool {
458457
}
459458

460459
noRestartPoliciesPopulated := true
460+
isPodRestartPolicyNever := pod.Spec.RestartPolicy == apiv1.RestartPolicyNever
461461

462462
for _, container := range pod.Spec.Containers {
463463
// If some of these are populated, we know it at least understands resizing
464464
if len(container.ResizePolicy) > 0 {
465465
noRestartPoliciesPopulated = false
466466
}
467467

468-
// TODO(maxcao13): Do we have to check the policy resource too? i.e. if only memory is getting scaled, then only check the memory resize policy?
468+
// TODO: We should check the containerResizePolicy resource and compare it with the requested update resources.
469+
// For example, if we are only updating CPU, and the CPU container resize policy is NotRequired, but the memory
470+
// resize policy is RestartContainer, then it's not going to accept an in-place disruptionless update, even though it should.
471+
// For now this function, cannot actually see the resource(s) to be updated,
469472
for _, policy := range container.ResizePolicy {
470473
if policy.RestartPolicy != apiv1.NotRequired {
471-
klog.InfoS("in-place resize of pod will cause container disruption, because of container resize policy", "pod", klog.KObj(pod), "container", container.Name, "restartPolicy", policy.RestartPolicy)
474+
klog.V(4).InfoS("in-place resize of pod will cause container disruption, because of container resize policy", "pod", klog.KObj(pod), "container", container.Name, "containerResizeRestartPolicy", policy.RestartPolicy)
472475
// TODO(jkyros): is there something that prevents this from happening elsewhere in the API?
473-
if pod.Spec.RestartPolicy == apiv1.RestartPolicyNever {
474-
klog.Warningf("in-place resize of %s not possible, container %s resize policy is %v but pod restartPolicy is %v", pod.Name, container.Name, policy.RestartPolicy, pod.Spec.RestartPolicy)
476+
if isPodRestartPolicyNever {
477+
klog.InfoS("in-place resize of pod not possible, container resize policy and pod restartPolicy conflict", "pod", klog.KObj(pod), "container", container.Name, "containerResizeRestartPolicy", policy.RestartPolicy, "podRestartPolicy", pod.Spec.RestartPolicy)
475478
return false
476479
}
477-
478480
}
479481
}
480482
}
@@ -591,7 +593,7 @@ func (e *podsEvictionRestrictionImpl) InPlaceUpdate(podToUpdate *apiv1.Pod, vpa
591593
klog.V(4).InfoS("Patched pod annotations", "pod", klog.KObj(res), "patches", string(patch))
592594
}
593595
} else {
594-
err := fmt.Errorf("no patches to apply to %s", podToUpdate.Name)
596+
err := fmt.Errorf("no resource patches were calculated to apply")
595597
klog.ErrorS(err, "Failed to patch pod", "pod", klog.KObj(podToUpdate))
596598
return err
597599
}
@@ -606,7 +608,7 @@ func (e *podsEvictionRestrictionImpl) InPlaceUpdate(podToUpdate *apiv1.Pod, vpa
606608
if podToUpdate.Status.Phase == apiv1.PodRunning {
607609
singleGroupStats, present := e.creatorToSingleGroupStatsMap[cr]
608610
if !present {
609-
klog.Errorf("Internal error - cannot find stats for replication group %v", cr)
611+
klog.InfoS("Internal error - cannot find stats for replication group", "pod", klog.KObj(podToUpdate), "podReplicaCreator", cr)
610612
} else {
611613
singleGroupStats.inPlaceUpdating = singleGroupStats.inPlaceUpdating + 1
612614
e.creatorToSingleGroupStatsMap[cr] = singleGroupStats

0 commit comments

Comments
 (0)