@@ -47,6 +47,7 @@ import (
47
47
"sigs.k8s.io/karpenter/pkg/scheduling"
48
48
"sigs.k8s.io/karpenter/pkg/test"
49
49
. "sigs.k8s.io/karpenter/pkg/test/expectations"
50
+ "sigs.k8s.io/karpenter/pkg/test/v1alpha1"
50
51
)
51
52
52
53
var _ = Describe ("Consolidation" , func () {
@@ -4379,4 +4380,142 @@ var _ = Describe("Consolidation", func() {
4379
4380
Expect (result .RequeueAfter ).To (BeNumerically (">" , 0 ))
4380
4381
})
4381
4382
})
4383
+ Context ("Reserved Capacity" , func () {
4384
+ var reservedNodeClaim * v1.NodeClaim
4385
+ var reservedNode * corev1.Node
4386
+ var mostExpensiveReservationID string
4387
+
4388
+ BeforeEach (func () {
4389
+ mostExpensiveReservationID = fmt .Sprintf ("r-%s" , mostExpensiveInstance .Name )
4390
+ mostExpensiveInstance .Requirements .Add (scheduling .NewRequirement (
4391
+ cloudprovider .ReservationIDLabel ,
4392
+ corev1 .NodeSelectorOpIn ,
4393
+ mostExpensiveReservationID ,
4394
+ ))
4395
+ mostExpensiveInstance .Requirements .Get (v1 .CapacityTypeLabelKey ).Insert (v1 .CapacityTypeReserved )
4396
+ mostExpensiveInstance .Offerings = append (mostExpensiveInstance .Offerings , & cloudprovider.Offering {
4397
+ Price : mostExpensiveOffering .Price / 1_000_000.0 ,
4398
+ Available : true ,
4399
+ ReservationCapacity : 10 ,
4400
+ Requirements : scheduling .NewLabelRequirements (map [string ]string {
4401
+ v1 .CapacityTypeLabelKey : v1 .CapacityTypeReserved ,
4402
+ corev1 .LabelTopologyZone : mostExpensiveOffering .Zone (),
4403
+ v1alpha1 .LabelReservationID : mostExpensiveReservationID ,
4404
+ }),
4405
+ })
4406
+ reservedNodeClaim , reservedNode = test .NodeClaimAndNode (v1.NodeClaim {
4407
+ ObjectMeta : metav1.ObjectMeta {
4408
+ Labels : map [string ]string {
4409
+ v1 .NodePoolLabelKey : nodePool .Name ,
4410
+ corev1 .LabelInstanceTypeStable : mostExpensiveInstance .Name ,
4411
+ v1 .CapacityTypeLabelKey : v1 .CapacityTypeReserved ,
4412
+ corev1 .LabelTopologyZone : mostExpensiveOffering .Requirements .Get (corev1 .LabelTopologyZone ).Any (),
4413
+ cloudprovider .ReservationIDLabel : mostExpensiveReservationID ,
4414
+ },
4415
+ },
4416
+ })
4417
+ reservedNodeClaim .StatusConditions ().SetTrue (v1 .ConditionTypeConsolidatable )
4418
+ ctx = options .ToContext (ctx , test .Options (test.OptionsFields {FeatureGates : test.FeatureGates {CapacityReservations : lo .ToPtr (true )}}))
4419
+ })
4420
+ DescribeTable (
4421
+ "can replace node" ,
4422
+ func (initialCapacityType string ) {
4423
+ nodeClaim = lo.Switch [string , * v1.NodeClaim ](initialCapacityType ).
4424
+ Case (v1 .CapacityTypeOnDemand , nodeClaim ).
4425
+ Case (v1 .CapacityTypeSpot , spotNodeClaim ).
4426
+ Default (reservedNodeClaim )
4427
+ node = lo.Switch [string , * corev1.Node ](initialCapacityType ).
4428
+ Case (v1 .CapacityTypeOnDemand , node ).
4429
+ Case (v1 .CapacityTypeSpot , spotNode ).
4430
+ Default (reservedNode )
4431
+
4432
+ // If the capacity type is reserved, we will need a cheaper reserved instance to consolidat into
4433
+ var leastExpensiveReservationID string
4434
+ if initialCapacityType == v1 .CapacityTypeReserved {
4435
+ leastExpensiveReservationID = fmt .Sprintf ("r-%s" , leastExpensiveInstance .Name )
4436
+ leastExpensiveInstance .Requirements .Add (scheduling .NewRequirement (
4437
+ cloudprovider .ReservationIDLabel ,
4438
+ corev1 .NodeSelectorOpIn ,
4439
+ leastExpensiveReservationID ,
4440
+ ))
4441
+ leastExpensiveInstance .Requirements .Get (v1 .CapacityTypeLabelKey ).Insert (v1 .CapacityTypeReserved )
4442
+ leastExpensiveInstance .Offerings = append (leastExpensiveInstance .Offerings , & cloudprovider.Offering {
4443
+ Price : leastExpensiveOffering .Price / 1_000_000.0 ,
4444
+ Available : true ,
4445
+ ReservationCapacity : 10 ,
4446
+ Requirements : scheduling .NewLabelRequirements (map [string ]string {
4447
+ v1 .CapacityTypeLabelKey : v1 .CapacityTypeReserved ,
4448
+ corev1 .LabelTopologyZone : leastExpensiveOffering .Zone (),
4449
+ v1alpha1 .LabelReservationID : leastExpensiveReservationID ,
4450
+ }),
4451
+ })
4452
+ }
4453
+
4454
+ // create our RS so we can link a pod to it
4455
+ rs := test .ReplicaSet ()
4456
+ ExpectApplied (ctx , env .Client , rs )
4457
+ Expect (env .Client .Get (ctx , client .ObjectKeyFromObject (rs ), rs )).To (Succeed ())
4458
+
4459
+ pod := test .Pod (test.PodOptions {ObjectMeta : metav1.ObjectMeta {
4460
+ Labels : labels ,
4461
+ OwnerReferences : []metav1.OwnerReference {
4462
+ {
4463
+ APIVersion : "apps/v1" ,
4464
+ Kind : "ReplicaSet" ,
4465
+ Name : rs .Name ,
4466
+ UID : rs .UID ,
4467
+ Controller : lo .ToPtr (true ),
4468
+ BlockOwnerDeletion : lo .ToPtr (true ),
4469
+ },
4470
+ },
4471
+ }})
4472
+ ExpectApplied (ctx , env .Client , rs , pod , node , nodeClaim , nodePool )
4473
+
4474
+ // bind pods to node
4475
+ ExpectManualBinding (ctx , env .Client , pod , node )
4476
+
4477
+ // inform cluster state about nodes and nodeClaims
4478
+ ExpectMakeNodesAndNodeClaimsInitializedAndStateUpdated (ctx , env .Client , nodeStateController , nodeClaimStateController , []* corev1.Node {node }, []* v1.NodeClaim {nodeClaim })
4479
+
4480
+ fakeClock .Step (10 * time .Minute )
4481
+
4482
+ // consolidation won't delete the old nodeclaim until the new nodeclaim is ready
4483
+ var wg sync.WaitGroup
4484
+ ExpectToWait (fakeClock , & wg )
4485
+ ExpectMakeNewNodeClaimsReady (ctx , env .Client , & wg , cluster , cloudProvider , 1 )
4486
+ ExpectSingletonReconciled (ctx , disruptionController )
4487
+ wg .Wait ()
4488
+
4489
+ // Process the item so that the nodes can be deleted.
4490
+ ExpectSingletonReconciled (ctx , queue )
4491
+
4492
+ // Cascade any deletion of the nodeclaim to the node
4493
+ ExpectNodeClaimsCascadeDeletion (ctx , env .Client , nodeClaim )
4494
+
4495
+ // should create a new nodeclaim as there is a cheaper one that can hold the pod
4496
+ nodeClaims := ExpectNodeClaims (ctx , env .Client )
4497
+ nodes := ExpectNodes (ctx , env .Client )
4498
+ Expect (nodeClaims ).To (HaveLen (1 ))
4499
+ Expect (nodes ).To (HaveLen (1 ))
4500
+
4501
+ Expect (nodeClaims [0 ].Name ).ToNot (Equal (nodeClaim .Name ))
4502
+ // If the original capacity type was OD or spot, we should be able to consolidate into the reserved offering of the
4503
+ // same type.
4504
+ Expect (scheduling .NewNodeSelectorRequirementsWithMinValues (nodeClaims [0 ].Spec .Requirements ... ).Has (corev1 .LabelInstanceTypeStable )).To (BeTrue ())
4505
+ Expect (scheduling .NewNodeSelectorRequirementsWithMinValues (nodeClaims [0 ].Spec .Requirements ... ).Get (corev1 .LabelInstanceTypeStable ).Has (mostExpensiveInstance .Name )).To (Equal (initialCapacityType != v1 .CapacityTypeReserved ))
4506
+ Expect (scheduling .NewNodeSelectorRequirementsWithMinValues (nodeClaims [0 ].Spec .Requirements ... ).Has (cloudprovider .ReservationIDLabel )).To (BeTrue ())
4507
+ Expect (scheduling .NewNodeSelectorRequirementsWithMinValues (nodeClaims [0 ].Spec .Requirements ... ).Get (cloudprovider .ReservationIDLabel ).Any ()).To (Equal (lo .Ternary (
4508
+ initialCapacityType == v1 .CapacityTypeReserved ,
4509
+ leastExpensiveReservationID ,
4510
+ mostExpensiveReservationID ,
4511
+ )))
4512
+
4513
+ // and delete the old one
4514
+ ExpectNotFound (ctx , env .Client , nodeClaim , node )
4515
+ },
4516
+ Entry ("on-demand" , v1 .CapacityTypeOnDemand ),
4517
+ Entry ("spot" , v1 .CapacityTypeSpot ),
4518
+ Entry ("reserved" , v1 .CapacityTypeReserved ),
4519
+ )
4520
+ })
4382
4521
})
0 commit comments