@@ -31,7 +31,6 @@ import (
31
31
appsv1 "k8s.io/api/apps/v1"
32
32
corev1 "k8s.io/api/core/v1"
33
33
"k8s.io/apimachinery/pkg/types"
34
- "k8s.io/apimachinery/pkg/util/sets"
35
34
"k8s.io/client-go/util/workqueue"
36
35
"k8s.io/klog/v2"
37
36
"k8s.io/utils/clock"
@@ -236,8 +235,8 @@ func (p *Provisioner) NewScheduler(ctx context.Context, pods []*corev1.Pod, stat
236
235
nodepoolutils .OrderByWeight (nodePools )
237
236
238
237
instanceTypes := map [string ][]* cloudprovider.InstanceType {}
239
- domains := map [string ]sets.Set [string ]{}
240
238
for _ , np := range nodePools {
239
+ // Get instance type options
241
240
its , err := p .cloudProvider .GetInstanceTypes (ctx , np )
242
241
if err != nil {
243
242
log .FromContext (ctx ).WithValues ("NodePool" , klog .KRef ("" , np .Name )).Error (err , "skipping, unable to resolve instance types" )
@@ -247,49 +246,14 @@ func (p *Provisioner) NewScheduler(ctx context.Context, pods []*corev1.Pod, stat
247
246
log .FromContext (ctx ).WithValues ("NodePool" , klog .KRef ("" , np .Name )).Info ("skipping, no resolved instance types found" )
248
247
continue
249
248
}
250
-
251
249
instanceTypes [np .Name ] = its
252
-
253
- // Construct Topology Domains
254
- for _ , it := range its {
255
- // We need to intersect the instance type requirements with the current nodePool requirements. This
256
- // ensures that something like zones from an instance type don't expand the universe of valid domains.
257
- requirements := scheduling .NewNodeSelectorRequirementsWithMinValues (np .Spec .Template .Spec .Requirements ... )
258
- requirements .Add (scheduling .NewLabelRequirements (np .Spec .Template .Labels ).Values ()... )
259
- requirements .Add (it .Requirements .Values ()... )
260
-
261
- for key , requirement := range requirements {
262
- // This code used to execute a Union between domains[key] and requirement.Values().
263
- // The downside of this is that Union is immutable and takes a copy of the set it is executed upon.
264
- // This resulted in a lot of memory pressure on the heap and poor performance
265
- // https://github.com/aws/karpenter/issues/3565
266
- if domains [key ] == nil {
267
- domains [key ] = sets .New (requirement .Values ()... )
268
- } else {
269
- domains [key ].Insert (requirement .Values ()... )
270
- }
271
- }
272
- }
273
-
274
- requirements := scheduling .NewNodeSelectorRequirementsWithMinValues (np .Spec .Template .Spec .Requirements ... )
275
- requirements .Add (scheduling .NewLabelRequirements (np .Spec .Template .Labels ).Values ()... )
276
- for key , requirement := range requirements {
277
- if requirement .Operator () == corev1 .NodeSelectorOpIn {
278
- // The following is a performance optimisation, for the explanation see the comment above
279
- if domains [key ] == nil {
280
- domains [key ] = sets .New (requirement .Values ()... )
281
- } else {
282
- domains [key ].Insert (requirement .Values ()... )
283
- }
284
- }
285
- }
286
250
}
287
251
288
252
// inject topology constraints
289
253
pods = p .injectVolumeTopologyRequirements (ctx , pods )
290
254
291
255
// Calculate cluster topology
292
- topology , err := scheduler .NewTopology (ctx , p .kubeClient , p .cluster , domains , pods )
256
+ topology , err := scheduler .NewTopology (ctx , p .kubeClient , p .cluster , stateNodes , nodePools , instanceTypes , pods )
293
257
if err != nil {
294
258
return nil , fmt .Errorf ("tracking topology counts, %w" , err )
295
259
}
0 commit comments