Skip to content

Commit abc077e

Browse files
authored
Merge pull request kubernetes#6509 from x13n/master
Reduce log spam in AtomicResizeFilteringProcessor
2 parents 6c14a3a + a842d4f commit abc077e

File tree

2 files changed

+21
-2
lines changed

2 files changed

+21
-2
lines changed

cluster-autoscaler/processors/nodes/scale_down_set_processor.go

Lines changed: 7 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@ import (
2020
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
2121
"k8s.io/autoscaler/cluster-autoscaler/context"
2222
"k8s.io/autoscaler/cluster-autoscaler/simulator"
23+
"k8s.io/autoscaler/cluster-autoscaler/utils/klogx"
2324
klog "k8s.io/klog/v2"
2425
)
2526

@@ -83,6 +84,8 @@ type AtomicResizeFilteringProcessor struct {
8384

8485
// GetNodesToRemove selects up to maxCount nodes for deletion, by selecting a first maxCount candidates
8586
func (p *AtomicResizeFilteringProcessor) GetNodesToRemove(ctx *context.AutoscalingContext, candidates []simulator.NodeToBeRemoved, maxCount int) []simulator.NodeToBeRemoved {
87+
atomicQuota := klogx.NodesLoggingQuota()
88+
standardQuota := klogx.NodesLoggingQuota()
8689
nodesByGroup := map[cloudprovider.NodeGroup][]simulator.NodeToBeRemoved{}
8790
result := []simulator.NodeToBeRemoved{}
8891
for _, node := range candidates {
@@ -97,13 +100,15 @@ func (p *AtomicResizeFilteringProcessor) GetNodesToRemove(ctx *context.Autoscali
97100
continue
98101
}
99102
if autoscalingOptions != nil && autoscalingOptions.ZeroOrMaxNodeScaling {
100-
klog.V(2).Infof("Considering node %s for atomic scale down", node.Node.Name)
103+
klogx.V(2).UpTo(atomicQuota).Infof("Considering node %s for atomic scale down", node.Node.Name)
101104
nodesByGroup[nodeGroup] = append(nodesByGroup[nodeGroup], node)
102105
} else {
103-
klog.V(2).Infof("Considering node %s for standard scale down", node.Node.Name)
106+
klogx.V(2).UpTo(standardQuota).Infof("Considering node %s for standard scale down", node.Node.Name)
104107
result = append(result, node)
105108
}
106109
}
110+
klogx.V(2).Over(atomicQuota).Infof("Considering %d other nodes for atomic scale down", -atomicQuota.Left())
111+
klogx.V(2).Over(standardQuota).Infof("Considering %d other nodes for standard scale down", -atomicQuota.Left())
107112
for nodeGroup, nodes := range nodesByGroup {
108113
ngSize, err := nodeGroup.TargetSize()
109114
if err != nil {

cluster-autoscaler/utils/klogx/defaults.go

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,12 @@ const (
2525
// MaxPodsLoggedV5 is the maximum number of pods for which we will
2626
// log detailed information every loop at verbosity >= 5.
2727
MaxPodsLoggedV5 = 1000
28+
// MaxNodesLogged is the maximum number of nodes for which we will
29+
// log detailed information every loop at verbosity < 5.
30+
MaxNodesLogged = 20
31+
// MaxNodesLoggedV5 is the maximum number of nodes for which we will
32+
// log detailed information every loop at verbosity >= 5.
33+
MaxNodesLoggedV5 = 1000
2834
)
2935

3036
// PodsLoggingQuota returns a new quota with default limit for pods at current verbosity.
@@ -34,3 +40,11 @@ func PodsLoggingQuota() *Quota {
3440
}
3541
return NewLoggingQuota(MaxPodsLogged)
3642
}
43+
44+
// NodesLoggingQuota returns a new quota with default limit for nodes at current verbosity.
45+
func NodesLoggingQuota() *Quota {
46+
if klog.V(5).Enabled() {
47+
return NewLoggingQuota(MaxNodesLoggedV5)
48+
}
49+
return NewLoggingQuota(MaxNodesLogged)
50+
}

0 commit comments

Comments
 (0)