Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
51 changes: 39 additions & 12 deletions cluster-autoscaler/context/autoscaling_context.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,10 @@ limitations under the License.
package context

import (
"time"

appsv1 "k8s.io/api/apps/v1"
apiv1 "k8s.io/api/core/v1"
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
"k8s.io/autoscaler/cluster-autoscaler/clusterstate"
"k8s.io/autoscaler/cluster-autoscaler/clusterstate/utils"
Expand All @@ -29,7 +33,9 @@ import (
"k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot"
draprovider "k8s.io/autoscaler/cluster-autoscaler/simulator/dynamicresources/provider"
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
kube_util "k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes"
"k8s.io/autoscaler/cluster-autoscaler/utils/taints"
"k8s.io/client-go/informers"
kube_client "k8s.io/client-go/kubernetes"
kube_record "k8s.io/client-go/tools/record"
Expand Down Expand Up @@ -61,10 +67,29 @@ type AutoscalingContext struct {
RemainingPdbTracker pdb.RemainingPdbTracker
// ClusterStateRegistry tracks the health of the node groups and pending scale-ups and scale-downs
ClusterStateRegistry *clusterstate.ClusterStateRegistry
//ProvisionRequstScaleUpMode indicates whether ClusterAutoscaler tries to accommodate ProvisioningRequest in current scale up iteration.
// ProvisionRequstScaleUpMode indicates whether ClusterAutoscaler tries to accommodate ProvisioningRequest in current scale up iteration.
ProvisioningRequestScaleUpMode bool
// DraProvider is the provider for dynamic resources allocation.
DraProvider *draprovider.Provider
// TemplateNodeInfoRegistry allows accessing template node infos.
TemplateNodeInfoRegistry TemplateNodeInfoRegistry
}

// TemplateNodeInfoRegistry is the interface for getting template node infos.
// All methods are thread-safe and can be called from separate goroutines.
type TemplateNodeInfoRegistry interface {
// GetNodeInfo returns a template NodeInfo for a given NodeGroup, as computed by TemplateNodeInfoProvider.
// The result is read-only.
GetNodeInfo(nodeGroupId string) (*framework.NodeInfo, bool)
// GetNodeInfos returns a map of all template NodeInfos, as computed by TemplateNodeInfoProvider.
// The map itself can be modified, but its values are read-only.
GetNodeInfos() map[string]*framework.NodeInfo
// Recompute updates the results/cache.
// It happens near the beginning of the main Cluster Autoscaler loop.
// The results are cached until the next Recompute() call.
// The getters can be used by logic that happens before the Recompute() call in the main CA loop,
// but the caller has to handle no results during the first CA loop.
Recompute(autoscalingCtx *AutoscalingContext, nodes []*apiv1.Node, daemonsets []*appsv1.DaemonSet, taintConfig taints.TaintConfig, currentTime time.Time) errors.AutoscalerError
}

// AutoscalingKubeClients contains all Kubernetes API clients,
Expand Down Expand Up @@ -112,19 +137,21 @@ func NewAutoscalingContext(
remainingPdbTracker pdb.RemainingPdbTracker,
clusterStateRegistry *clusterstate.ClusterStateRegistry,
draProvider *draprovider.Provider,
templateNodeInfoRegistry TemplateNodeInfoRegistry,
) *AutoscalingContext {
return &AutoscalingContext{
AutoscalingOptions: options,
CloudProvider: cloudProvider,
AutoscalingKubeClients: *autoscalingKubeClients,
FrameworkHandle: fwHandle,
ClusterSnapshot: clusterSnapshot,
ExpanderStrategy: expanderStrategy,
ProcessorCallbacks: processorCallbacks,
DebuggingSnapshotter: debuggingSnapshotter,
RemainingPdbTracker: remainingPdbTracker,
ClusterStateRegistry: clusterStateRegistry,
DraProvider: draProvider,
AutoscalingOptions: options,
CloudProvider: cloudProvider,
AutoscalingKubeClients: *autoscalingKubeClients,
FrameworkHandle: fwHandle,
ClusterSnapshot: clusterSnapshot,
ExpanderStrategy: expanderStrategy,
ProcessorCallbacks: processorCallbacks,
DebuggingSnapshotter: debuggingSnapshotter,
RemainingPdbTracker: remainingPdbTracker,
ClusterStateRegistry: clusterStateRegistry,
DraProvider: draProvider,
TemplateNodeInfoRegistry: templateNodeInfoRegistry,
}
}

Expand Down
4 changes: 2 additions & 2 deletions cluster-autoscaler/core/scaledown/actuation/actuator_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1229,7 +1229,7 @@ func runStartDeletionTest(t *testing.T, tc startDeletionTestCase, force bool) {
}

registry := kube_util.NewListerRegistry(nil, nil, podLister, pdbLister, dsLister, nil, nil, nil, nil)
autoscalingCtx, err := NewScaleTestAutoscalingContext(opts, fakeClient, registry, provider, nil, nil)
autoscalingCtx, err := NewScaleTestAutoscalingContext(opts, fakeClient, registry, provider, nil, nil, nil)
if err != nil {
t.Fatalf("Couldn't set up autoscaling context: %v", err)
}
Expand Down Expand Up @@ -1542,7 +1542,7 @@ func TestStartDeletionInBatchBasic(t *testing.T) {
podLister := kube_util.NewTestPodLister([]*apiv1.Pod{})
pdbLister := kube_util.NewTestPodDisruptionBudgetLister([]*policyv1.PodDisruptionBudget{})
registry := kube_util.NewListerRegistry(nil, nil, podLister, pdbLister, nil, nil, nil, nil, nil)
autoscalingCtx, err := NewScaleTestAutoscalingContext(opts, fakeClient, registry, provider, nil, nil)
autoscalingCtx, err := NewScaleTestAutoscalingContext(opts, fakeClient, registry, provider, nil, nil, nil)
if err != nil {
t.Fatalf("Couldn't set up autoscaling context: %v", err)
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ import (

func TestAddNodeToBucket(t *testing.T) {
provider := testprovider.NewTestCloudProviderBuilder().Build()
autoscalingCtx, err := NewScaleTestAutoscalingContext(config.AutoscalingOptions{}, nil, nil, provider, nil, nil)
autoscalingCtx, err := NewScaleTestAutoscalingContext(config.AutoscalingOptions{}, nil, nil, provider, nil, nil, nil)
if err != nil {
t.Fatalf("Couldn't set up autoscaling context: %v", err)
}
Expand Down Expand Up @@ -158,7 +158,7 @@ func TestRemove(t *testing.T) {
return true, obj, nil
})

autoscalingCtx, err := NewScaleTestAutoscalingContext(config.AutoscalingOptions{}, fakeClient, nil, provider, nil, nil)
autoscalingCtx, err := NewScaleTestAutoscalingContext(config.AutoscalingOptions{}, fakeClient, nil, provider, nil, nil, nil)
if err != nil {
t.Fatalf("Couldn't set up autoscaling context: %v", err)
}
Expand Down
16 changes: 8 additions & 8 deletions cluster-autoscaler/core/scaledown/actuation/drain_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -141,7 +141,7 @@ func TestDaemonSetEvictionForEmptyNodes(t *testing.T) {
provider.AddNode("ng1", n1)
registry := kube_util.NewListerRegistry(nil, nil, nil, nil, nil, nil, nil, nil, nil)

autoscalingCtx, err := NewScaleTestAutoscalingContext(options, fakeClient, registry, provider, nil, nil)
autoscalingCtx, err := NewScaleTestAutoscalingContext(options, fakeClient, registry, provider, nil, nil, nil)
assert.NoError(t, err)

clustersnapshot.InitializeClusterSnapshotOrDie(t, autoscalingCtx.ClusterSnapshot, []*apiv1.Node{n1}, dsPods)
Expand Down Expand Up @@ -208,7 +208,7 @@ func TestDrainNodeWithPods(t *testing.T) {
MaxPodEvictionTime: 5 * time.Second,
DaemonSetEvictionForOccupiedNodes: true,
}
autoscalingCtx, err := NewScaleTestAutoscalingContext(options, fakeClient, nil, nil, nil, nil)
autoscalingCtx, err := NewScaleTestAutoscalingContext(options, fakeClient, nil, nil, nil, nil, nil)
assert.NoError(t, err)

legacyFlagDrainConfig := SingleRuleDrainConfig(autoscalingCtx.MaxGracefulTerminationSec)
Expand Down Expand Up @@ -272,7 +272,7 @@ func TestDrainNodeWithPodsWithRescheduled(t *testing.T) {
MaxGracefulTerminationSec: 20,
MaxPodEvictionTime: 5 * time.Second,
}
autoscalingCtx, err := NewScaleTestAutoscalingContext(options, fakeClient, nil, nil, nil, nil)
autoscalingCtx, err := NewScaleTestAutoscalingContext(options, fakeClient, nil, nil, nil, nil, nil)
assert.NoError(t, err)

legacyFlagDrainConfig := SingleRuleDrainConfig(autoscalingCtx.MaxGracefulTerminationSec)
Expand Down Expand Up @@ -341,7 +341,7 @@ func TestDrainNodeWithPodsWithRetries(t *testing.T) {
MaxPodEvictionTime: 5 * time.Second,
DaemonSetEvictionForOccupiedNodes: true,
}
autoscalingCtx, err := NewScaleTestAutoscalingContext(options, fakeClient, nil, nil, nil, nil)
autoscalingCtx, err := NewScaleTestAutoscalingContext(options, fakeClient, nil, nil, nil, nil, nil)
assert.NoError(t, err)

legacyFlagDrainConfig := SingleRuleDrainConfig(autoscalingCtx.MaxGracefulTerminationSec)
Expand Down Expand Up @@ -416,7 +416,7 @@ func TestDrainNodeWithPodsDaemonSetEvictionFailure(t *testing.T) {
MaxGracefulTerminationSec: 20,
MaxPodEvictionTime: 0 * time.Second,
}
autoscalingCtx, err := NewScaleTestAutoscalingContext(options, fakeClient, nil, nil, nil, nil)
autoscalingCtx, err := NewScaleTestAutoscalingContext(options, fakeClient, nil, nil, nil, nil, nil)
assert.NoError(t, err)

legacyFlagDrainConfig := SingleRuleDrainConfig(autoscalingCtx.MaxGracefulTerminationSec)
Expand Down Expand Up @@ -478,7 +478,7 @@ func TestDrainNodeWithPodsEvictionFailure(t *testing.T) {
MaxGracefulTerminationSec: 20,
MaxPodEvictionTime: 0 * time.Second,
}
autoscalingCtx, err := NewScaleTestAutoscalingContext(options, fakeClient, nil, nil, nil, nil)
autoscalingCtx, err := NewScaleTestAutoscalingContext(options, fakeClient, nil, nil, nil, nil, nil)
assert.NoError(t, err)
r := evRegister{}
legacyFlagDrainConfig := SingleRuleDrainConfig(autoscalingCtx.MaxGracefulTerminationSec)
Expand Down Expand Up @@ -559,7 +559,7 @@ func TestDrainForceNodeWithPodsEvictionFailure(t *testing.T) {
MaxGracefulTerminationSec: 20,
MaxPodEvictionTime: 0 * time.Second,
}
autoscalingCtx, err := NewScaleTestAutoscalingContext(options, fakeClient, nil, nil, nil, nil)
autoscalingCtx, err := NewScaleTestAutoscalingContext(options, fakeClient, nil, nil, nil, nil, nil)
assert.NoError(t, err)
r := evRegister{}
legacyFlagDrainConfig := SingleRuleDrainConfig(autoscalingCtx.MaxGracefulTerminationSec)
Expand Down Expand Up @@ -621,7 +621,7 @@ func TestDrainWithPodsNodeDisappearanceFailure(t *testing.T) {
MaxGracefulTerminationSec: 0,
MaxPodEvictionTime: 0 * time.Second,
}
autoscalingCtx, err := NewScaleTestAutoscalingContext(options, fakeClient, nil, nil, nil, nil)
autoscalingCtx, err := NewScaleTestAutoscalingContext(options, fakeClient, nil, nil, nil, nil, nil)
assert.NoError(t, err)

legacyFlagDrainConfig := SingleRuleDrainConfig(autoscalingCtx.MaxGracefulTerminationSec)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -147,7 +147,7 @@ func TestScheduleDeletion(t *testing.T) {
t.Fatalf("Couldn't create daemonset lister")
}
registry := kube_util.NewListerRegistry(nil, nil, podLister, pdbLister, dsLister, nil, nil, nil, nil)
autoscalingCtx, err := NewScaleTestAutoscalingContext(opts, fakeClient, registry, provider, nil, nil)
autoscalingCtx, err := NewScaleTestAutoscalingContext(opts, fakeClient, registry, provider, nil, nil, nil)
if err != nil {
t.Fatalf("Couldn't set up autoscaling context: %v", err)
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,7 @@ func TestPriorityEvictor(t *testing.T) {
MaxGracefulTerminationSec: 20,
MaxPodEvictionTime: 5 * time.Second,
}
autoscalingCtx, err := NewScaleTestAutoscalingContext(options, fakeClient, nil, nil, nil, nil)
autoscalingCtx, err := NewScaleTestAutoscalingContext(options, fakeClient, nil, nil, nil, nil, nil)
assert.NoError(t, err)

evictor := Evictor{
Expand Down
4 changes: 2 additions & 2 deletions cluster-autoscaler/core/scaledown/actuation/softtaint_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ func TestSoftTaintUpdate(t *testing.T) {
}
registry := kube_util.NewListerRegistry(nil, nil, nil, nil, nil, nil, nil, nil, nil)

actx, err := test.NewScaleTestAutoscalingContext(options, fakeClient, registry, provider, nil, nil)
actx, err := test.NewScaleTestAutoscalingContext(options, fakeClient, registry, provider, nil, nil, nil)
assert.NoError(t, err)

// Test no superfluous nodes
Expand Down Expand Up @@ -153,7 +153,7 @@ func TestSoftTaintTimeLimit(t *testing.T) {
}
registry := kube_util.NewListerRegistry(nil, nil, nil, nil, nil, nil, nil, nil, nil)

actx, err := test.NewScaleTestAutoscalingContext(options, fakeClient, registry, provider, nil, nil)
actx, err := test.NewScaleTestAutoscalingContext(options, fakeClient, registry, provider, nil, nil, nil)
assert.NoError(t, err)

// Test bulk taint
Expand Down
2 changes: 1 addition & 1 deletion cluster-autoscaler/core/scaledown/budgets/budgets_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -451,7 +451,7 @@ func TestCropNodesToBudgets(t *testing.T) {
NodeDeleteDelayAfterTaint: 1 * time.Second,
}

autoscalingCtx, err := test.NewScaleTestAutoscalingContext(options, &fake.Clientset{}, nil, provider, nil, nil)
autoscalingCtx, err := test.NewScaleTestAutoscalingContext(options, &fake.Clientset{}, nil, provider, nil, nil, nil)
assert.NoError(t, err)
ndt := deletiontracker.NewNodeDeletionTracker(1 * time.Hour)
for i := 0; i < tc.emptyDeletionsInProgress; i++ {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -229,7 +229,7 @@ func TestFilterOutUnremovable(t *testing.T) {
for _, n := range tc.nodes {
provider.AddNode("ng1", n)
}
autoscalingCtx, err := NewScaleTestAutoscalingContext(options, &fake.Clientset{}, nil, provider, nil, nil)
autoscalingCtx, err := NewScaleTestAutoscalingContext(options, &fake.Clientset{}, nil, provider, nil, nil, nil)
if err != nil {
t.Fatalf("Could not create autoscaling context: %v", err)
}
Expand Down
28 changes: 18 additions & 10 deletions cluster-autoscaler/core/scaledown/planner/planner_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -493,17 +493,19 @@ func TestUpdateClusterState(t *testing.T) {
for _, node := range tc.nodes {
provider.AddNode("ng1", node)
}
autoscalingCtx, err := NewScaleTestAutoscalingContext(config.AutoscalingOptions{
opts := config.AutoscalingOptions{
NodeGroupDefaults: config.NodeGroupAutoscalingOptions{
ScaleDownUnneededTime: 10 * time.Minute,
},
ScaleDownSimulationTimeout: 1 * time.Second,
MaxScaleDownParallelism: 10,
}, &fake.Clientset{}, registry, provider, nil, nil)
}
processors, templateNodeInfoRegistry := processorstest.NewTestProcessors(opts)
autoscalingCtx, err := NewScaleTestAutoscalingContext(opts, &fake.Clientset{}, registry, provider, nil, nil, templateNodeInfoRegistry)
assert.NoError(t, err)
clustersnapshot.InitializeClusterSnapshotOrDie(t, autoscalingCtx.ClusterSnapshot, tc.nodes, tc.pods)
deleteOptions := options.NodeDeleteOptions{}
p := New(&autoscalingCtx, processorstest.NewTestProcessors(&autoscalingCtx), deleteOptions, nil)
p := New(&autoscalingCtx, processors, deleteOptions, nil)
p.eligibilityChecker = &fakeEligibilityChecker{eligible: asMap(tc.eligible)}
if tc.isSimulationTimeout {
autoscalingCtx.AutoscalingOptions.ScaleDownSimulationTimeout = 1 * time.Second
Expand Down Expand Up @@ -689,17 +691,19 @@ func TestUpdateClusterStatUnneededNodesLimit(t *testing.T) {
for _, node := range nodes {
provider.AddNode("ng1", node)
}
autoscalingCtx, err := NewScaleTestAutoscalingContext(config.AutoscalingOptions{
autoscalingOpts := config.AutoscalingOptions{
NodeGroupDefaults: config.NodeGroupAutoscalingOptions{
ScaleDownUnneededTime: tc.maxUnneededTime,
},
ScaleDownSimulationTimeout: 1 * time.Hour,
MaxScaleDownParallelism: tc.maxParallelism,
}, &fake.Clientset{}, nil, provider, nil, nil)
}
processors, templateNodeInfoRegistry := processorstest.NewTestProcessors(autoscalingOpts)
autoscalingCtx, err := NewScaleTestAutoscalingContext(autoscalingOpts, &fake.Clientset{}, nil, provider, nil, nil, templateNodeInfoRegistry)
assert.NoError(t, err)
clustersnapshot.InitializeClusterSnapshotOrDie(t, autoscalingCtx.ClusterSnapshot, nodes, nil)
deleteOptions := options.NodeDeleteOptions{}
p := New(&autoscalingCtx, processorstest.NewTestProcessors(&autoscalingCtx), deleteOptions, nil)
p := New(&autoscalingCtx, processors, deleteOptions, nil)
p.eligibilityChecker = &fakeEligibilityChecker{eligible: asMap(nodeNames(nodes))}
p.minUpdateInterval = tc.updateInterval
p.unneededNodes.Update(&autoscalingCtx, previouslyUnneeded, time.Now())
Expand Down Expand Up @@ -817,6 +821,7 @@ func TestNewPlannerWithExistingDeletionCandidateNodes(t *testing.T) {
provider.AddNode("ng1", node)
}

processors, templateNodeInfoRegistry := processorstest.NewTestProcessors(autoscalingOptions)
autoscalingCtx, err := NewScaleTestAutoscalingContext(
autoscalingOptions,
&fake.Clientset{},
Expand All @@ -829,11 +834,12 @@ func TestNewPlannerWithExistingDeletionCandidateNodes(t *testing.T) {
provider,
nil,
nil,
templateNodeInfoRegistry,
)
assert.NoError(t, err)

deleteOptions := options.NodeDeleteOptions{}
p := New(&autoscalingCtx, processorstest.NewTestProcessors(&autoscalingCtx), deleteOptions, nil)
p := New(&autoscalingCtx, processors, deleteOptions, nil)

p.unneededNodes.AsList()
})
Expand Down Expand Up @@ -1014,16 +1020,18 @@ func TestNodesToDelete(t *testing.T) {
provider.AddNode(ng, node)
}
}
autoscalingCtx, err := NewScaleTestAutoscalingContext(config.AutoscalingOptions{
autoscalingOpts := config.AutoscalingOptions{
NodeGroupDefaults: config.NodeGroupAutoscalingOptions{
ScaleDownUnneededTime: 10 * time.Minute,
ScaleDownUnreadyTime: 0 * time.Minute,
},
}, &fake.Clientset{}, nil, provider, nil, nil)
}
processors, templateNodeInfoRegistry := processorstest.NewTestProcessors(autoscalingOpts)
autoscalingCtx, err := NewScaleTestAutoscalingContext(autoscalingOpts, &fake.Clientset{}, nil, provider, nil, nil, templateNodeInfoRegistry)
assert.NoError(t, err)
clustersnapshot.InitializeClusterSnapshotOrDie(t, autoscalingCtx.ClusterSnapshot, allNodes, nil)
deleteOptions := options.NodeDeleteOptions{}
p := New(&autoscalingCtx, processorstest.NewTestProcessors(&autoscalingCtx), deleteOptions, nil)
p := New(&autoscalingCtx, processors, deleteOptions, nil)
p.latestUpdate = time.Now()
p.scaleDownContext.ActuationStatus = deletiontracker.NewNodeDeletionTracker(0 * time.Second)
p.unneededNodes.Update(&autoscalingCtx, allRemovables, time.Now().Add(-1*time.Hour))
Expand Down
2 changes: 1 addition & 1 deletion cluster-autoscaler/core/scaledown/unneeded/nodes_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -259,7 +259,7 @@ func TestRemovableAt(t *testing.T) {
rsLister, err := kube_util.NewTestReplicaSetLister(nil)
assert.NoError(t, err)
registry := kube_util.NewListerRegistry(nil, nil, nil, nil, nil, nil, nil, rsLister, nil)
autoscalingCtx, err := NewScaleTestAutoscalingContext(config.AutoscalingOptions{ScaleDownSimulationTimeout: 5 * time.Minute}, &fake.Clientset{}, registry, provider, nil, nil)
autoscalingCtx, err := NewScaleTestAutoscalingContext(config.AutoscalingOptions{ScaleDownSimulationTimeout: 5 * time.Minute}, &fake.Clientset{}, registry, provider, nil, nil, nil)
assert.NoError(t, err)
expectedThreshold := 5 * time.Minute
fakeTimeGetter := &fakeScaleDownTimeGetter{
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -95,10 +95,10 @@ func TestNodePoolAsyncInitialization(t *testing.T) {
listers := kube_util.NewListerRegistry(nil, nil, nil, nil, nil, nil, nil, nil, nil)
upcomingNodeGroup := provider.BuildNodeGroup("upcoming-ng", 0, 100, 0, false, true, "T1", nil)
options := config.AutoscalingOptions{AsyncNodeGroupsEnabled: true}
context, err := NewScaleTestAutoscalingContext(options, &fake.Clientset{}, listers, provider, nil, nil)
processors, templateNodeInfoRegistry := processorstest.NewTestProcessors(options)
context, err := NewScaleTestAutoscalingContext(options, &fake.Clientset{}, listers, provider, nil, nil, templateNodeInfoRegistry)
assert.NoError(t, err)
option := expander.Option{NodeGroup: upcomingNodeGroup, Pods: []*apiv1.Pod{pod}}
processors := processorstest.NewTestProcessors(&context)
processors.AsyncNodeGroupStateChecker = &asyncnodegroups.MockAsyncNodeGroupStateChecker{IsUpcomingNodeGroup: map[string]bool{upcomingNodeGroup.Id(): true}}
nodeInfo := framework.NewTestNodeInfo(BuildTestNode("t1", 100, 0))
executor := newScaleUpExecutor(&context, processors.ScaleStateNotifier, processors.AsyncNodeGroupStateChecker)
Expand Down
Loading