Skip to content

Commit d39fdb5

Browse files
Migrate logger from knative to controller-runtime (#6150)
1 parent 9c580fd commit d39fdb5

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

53 files changed

+155
-164
lines changed

cmd/controller/main.go

+1
Original file line numberDiff line numberDiff line change
@@ -31,6 +31,7 @@ import (
3131

3232
func main() {
3333
ctx, op := operator.NewOperator(coreoperator.NewOperator())
34+
3435
awsCloudProvider := cloudprovider.New(
3536
op.InstanceTypesProvider,
3637
op.InstanceProvider,

go.mod

+3-3
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,7 @@ require (
2929
k8s.io/utils v0.0.0-20240102154912-e7106e64919e
3030
knative.dev/pkg v0.0.0-20231010144348-ca8c009405dd
3131
sigs.k8s.io/controller-runtime v0.18.2
32-
sigs.k8s.io/karpenter v0.36.1-0.20240516162236-0e678127e788
32+
sigs.k8s.io/karpenter v0.36.1-0.20240521002315-9b145a6d85b4
3333
sigs.k8s.io/yaml v1.4.0
3434
)
3535

@@ -108,9 +108,9 @@ require (
108108
gopkg.in/inf.v0 v0.9.1 // indirect
109109
gopkg.in/yaml.v2 v2.4.0 // indirect
110110
gopkg.in/yaml.v3 v3.0.1 // indirect
111-
k8s.io/cloud-provider v0.30.0 // indirect
111+
k8s.io/cloud-provider v0.30.1 // indirect
112112
k8s.io/component-base v0.30.1 // indirect
113-
k8s.io/csi-translation-lib v0.30.0 // indirect
113+
k8s.io/csi-translation-lib v0.30.1 // indirect
114114
k8s.io/klog/v2 v2.120.1 // indirect
115115
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect
116116
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect

go.sum

+6-6
Original file line numberDiff line numberDiff line change
@@ -740,12 +740,12 @@ k8s.io/apimachinery v0.30.1 h1:ZQStsEfo4n65yAdlGTfP/uSHMQSoYzU/oeEbkmF7P2U=
740740
k8s.io/apimachinery v0.30.1/go.mod h1:iexa2somDaxdnj7bha06bhb43Zpa6eWH8N8dbqVjTUc=
741741
k8s.io/client-go v0.30.1 h1:uC/Ir6A3R46wdkgCV3vbLyNOYyCJ8oZnjtJGKfytl/Q=
742742
k8s.io/client-go v0.30.1/go.mod h1:wrAqLNs2trwiCH/wxxmT/x3hKVH9PuV0GGW0oDoHVqc=
743-
k8s.io/cloud-provider v0.30.0 h1:hz1MXkFjsyO167sRZVchXEi2YYMQ6kolBi79nuICjzw=
744-
k8s.io/cloud-provider v0.30.0/go.mod h1:iyVcGvDfmZ7m5cliI9TTHj0VTjYDNpc/K71Gp6hukjU=
743+
k8s.io/cloud-provider v0.30.1 h1:OslHpog97zG9Kr7/vV1ki8nLKq8xTPUkN/kepCxBqKI=
744+
k8s.io/cloud-provider v0.30.1/go.mod h1:1uZp+FSskXQoeAAIU91/XCO8X/9N1U3z5usYeSLT4MI=
745745
k8s.io/component-base v0.30.1 h1:bvAtlPh1UrdaZL20D9+sWxsJljMi0QZ3Lmw+kmZAaxQ=
746746
k8s.io/component-base v0.30.1/go.mod h1:e/X9kDiOebwlI41AvBHuWdqFriSRrX50CdwA9TFaHLI=
747-
k8s.io/csi-translation-lib v0.30.0 h1:pEe6jshNVE4od2AdgYlsAtiKP/MH+NcsBbUPA/dWA6U=
748-
k8s.io/csi-translation-lib v0.30.0/go.mod h1:5TT/awOiKEX+8CcbReVYJyddT7xqlFrp3ChE9e45MyU=
747+
k8s.io/csi-translation-lib v0.30.1 h1:fIBtNMQjyr7HFv3xGSSH9cWOQS1K1kIBmZ1zRsHuVKs=
748+
k8s.io/csi-translation-lib v0.30.1/go.mod h1:l0HrIBIxUKRvqnNWqn6AXTYgUa2mAFLT6bjo1lU+55U=
749749
k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw=
750750
k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
751751
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag=
@@ -761,8 +761,8 @@ sigs.k8s.io/controller-runtime v0.18.2 h1:RqVW6Kpeaji67CY5nPEfRz6ZfFMk0lWQlNrLql
761761
sigs.k8s.io/controller-runtime v0.18.2/go.mod h1:tuAt1+wbVsXIT8lPtk5RURxqAnq7xkpv2Mhttslg7Hw=
762762
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
763763
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
764-
sigs.k8s.io/karpenter v0.36.1-0.20240516162236-0e678127e788 h1:xrzVuIjd2MWfdoiIElJlJgzMvYA6MDaA1CVQUxCOhRk=
765-
sigs.k8s.io/karpenter v0.36.1-0.20240516162236-0e678127e788/go.mod h1:Ov8+tDVcF2BIPti+HL0hgoxIGy+rGIymKZAYZprl0Ww=
764+
sigs.k8s.io/karpenter v0.36.1-0.20240521002315-9b145a6d85b4 h1:zIKW8TX593mp/rlOdCqIbgUdVRQGHzeFkgDM6+zgeE8=
765+
sigs.k8s.io/karpenter v0.36.1-0.20240521002315-9b145a6d85b4/go.mod h1:5XYrIz9Bi7HgQyaUsx7O08ft+TJjrH+htlnPq8Sz9J8=
766766
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4=
767767
sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08=
768768
sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=

pkg/apis/v1beta1/suite_test.go

+1-1
Original file line numberDiff line numberDiff line change
@@ -20,9 +20,9 @@ import (
2020

2121
. "github.com/onsi/ginkgo/v2"
2222
. "github.com/onsi/gomega"
23-
. "knative.dev/pkg/logging/testing"
2423

2524
. "sigs.k8s.io/karpenter/pkg/test/expectations"
25+
. "sigs.k8s.io/karpenter/pkg/utils/testing"
2626

2727
"sigs.k8s.io/karpenter/pkg/operator/scheme"
2828
coretest "sigs.k8s.io/karpenter/pkg/test"

pkg/batcher/createfleet.go

+2-2
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@ import (
2222
"github.com/aws/aws-sdk-go/aws"
2323
"github.com/aws/aws-sdk-go/service/ec2"
2424
"github.com/aws/aws-sdk-go/service/ec2/ec2iface"
25-
"knative.dev/pkg/logging"
25+
"sigs.k8s.io/controller-runtime/pkg/log"
2626
)
2727

2828
type CreateFleetBatcher struct {
@@ -70,7 +70,7 @@ func execCreateFleetBatch(ec2api ec2iface.EC2API) BatchExecutor[ec2.CreateFleetI
7070
for _, instanceID := range reservation.InstanceIds {
7171
requestIdx++
7272
if requestIdx >= len(inputs) {
73-
logging.FromContext(ctx).Errorf("received more instances than requested, ignoring instance %s", aws.StringValue(instanceID))
73+
log.FromContext(ctx).Error(fmt.Errorf("received more instances than requested, ignoring instance %s", aws.StringValue(instanceID)), "received error while batching")
7474
continue
7575
}
7676
results = append(results, Result[ec2.CreateFleetOutput]{

pkg/batcher/describeinstances.go

+2-2
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@ import (
2626
"github.com/mitchellh/hashstructure/v2"
2727
"github.com/samber/lo"
2828
"k8s.io/apimachinery/pkg/util/sets"
29-
"knative.dev/pkg/logging"
29+
"sigs.k8s.io/controller-runtime/pkg/log"
3030
)
3131

3232
type DescribeInstancesBatcher struct {
@@ -56,7 +56,7 @@ func (b *DescribeInstancesBatcher) DescribeInstances(ctx context.Context, descri
5656
func FilterHasher(ctx context.Context, input *ec2.DescribeInstancesInput) uint64 {
5757
hash, err := hashstructure.Hash(input.Filters, hashstructure.FormatV2, &hashstructure.HashOptions{SlicesAsSets: true})
5858
if err != nil {
59-
logging.FromContext(ctx).Errorf("error hashing")
59+
log.FromContext(ctx).Error(err, "failed hashing input filters")
6060
}
6161
return hash
6262
}

pkg/batcher/suite_test.go

+1-1
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@ import (
3030

3131
. "github.com/onsi/ginkgo/v2"
3232
. "github.com/onsi/gomega"
33-
. "knative.dev/pkg/logging/testing"
33+
. "sigs.k8s.io/karpenter/pkg/utils/testing"
3434
)
3535

3636
var fakeEC2API *fake.EC2API

pkg/batcher/terminateinstances.go

+2-2
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@ import (
2525
"github.com/aws/aws-sdk-go/service/ec2/ec2iface"
2626
"github.com/samber/lo"
2727
"k8s.io/apimachinery/pkg/util/sets"
28-
"knative.dev/pkg/logging"
28+
"sigs.k8s.io/controller-runtime/pkg/log"
2929
)
3030

3131
type TerminateInstancesBatcher struct {
@@ -68,7 +68,7 @@ func execTerminateInstancesBatch(ec2api ec2iface.EC2API) BatchExecutor[ec2.Termi
6868
// We don't care about the error here since we'll break up the batch upon any sort of failure
6969
output, err := ec2api.TerminateInstancesWithContext(ctx, firstInput)
7070
if err != nil {
71-
logging.FromContext(ctx).Errorf("terminating instances, %s", err)
71+
log.FromContext(ctx).Error(err, "failed terminating instances")
7272
}
7373

7474
if output == nil {

pkg/cache/unavailableofferings.go

+3-3
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@ import (
2222
"github.com/aws/aws-sdk-go/aws"
2323
"github.com/aws/aws-sdk-go/service/ec2"
2424
"github.com/patrickmn/go-cache"
25-
"knative.dev/pkg/logging"
25+
"sigs.k8s.io/controller-runtime/pkg/log"
2626
)
2727

2828
// UnavailableOfferings stores any offerings that return ICE (insufficient capacity errors) when
@@ -54,12 +54,12 @@ func (u *UnavailableOfferings) IsUnavailable(instanceType, zone, capacityType st
5454
// MarkUnavailable communicates recently observed temporary capacity shortages in the provided offerings
5555
func (u *UnavailableOfferings) MarkUnavailable(ctx context.Context, unavailableReason, instanceType, zone, capacityType string) {
5656
// even if the key is already in the cache, we still need to call Set to extend the cached entry's TTL
57-
logging.FromContext(ctx).With(
57+
log.FromContext(ctx).WithValues(
5858
"reason", unavailableReason,
5959
"instance-type", instanceType,
6060
"zone", zone,
6161
"capacity-type", capacityType,
62-
"ttl", UnavailableOfferingsTTL).Debugf("removing offering from offerings")
62+
"ttl", UnavailableOfferingsTTL).V(1).Info("removing offering from offerings")
6363
u.cache.SetDefault(u.key(instanceType, zone, capacityType), struct{}{})
6464
atomic.AddUint64(&u.SeqNum, 1)
6565
}

pkg/cloudprovider/cloudprovider.go

+3-5
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,7 @@ import (
2626
"k8s.io/apimachinery/pkg/api/resource"
2727
"k8s.io/apimachinery/pkg/runtime/schema"
2828

29+
"sigs.k8s.io/controller-runtime/pkg/log"
2930
corev1beta1 "sigs.k8s.io/karpenter/pkg/apis/v1beta1"
3031
"sigs.k8s.io/karpenter/pkg/events"
3132
"sigs.k8s.io/karpenter/pkg/scheduling"
@@ -39,7 +40,6 @@ import (
3940
v1 "k8s.io/api/core/v1"
4041
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
4142
"k8s.io/apimachinery/pkg/types"
42-
"knative.dev/pkg/logging"
4343
"sigs.k8s.io/controller-runtime/pkg/client"
4444

4545
cloudproviderevents "github.com/aws/karpenter-provider-aws/pkg/cloudprovider/events"
@@ -135,7 +135,7 @@ func (c *CloudProvider) Get(ctx context.Context, providerID string) (*corev1beta
135135
if err != nil {
136136
return nil, fmt.Errorf("getting instance ID, %w", err)
137137
}
138-
ctx = logging.WithLogger(ctx, logging.FromContext(ctx).With("id", id))
138+
ctx = log.IntoContext(ctx, log.FromContext(ctx).WithValues("id", id))
139139
instance, err := c.instanceProvider.Get(ctx, id)
140140
if err != nil {
141141
return nil, fmt.Errorf("getting instance, %w", err)
@@ -172,13 +172,11 @@ func (c *CloudProvider) GetInstanceTypes(ctx context.Context, nodePool *corev1be
172172
}
173173

174174
func (c *CloudProvider) Delete(ctx context.Context, nodeClaim *corev1beta1.NodeClaim) error {
175-
ctx = logging.WithLogger(ctx, logging.FromContext(ctx).With("nodeclaim", nodeClaim.Name))
176-
177175
id, err := utils.ParseInstanceID(nodeClaim.Status.ProviderID)
178176
if err != nil {
179177
return fmt.Errorf("getting instance ID, %w", err)
180178
}
181-
ctx = logging.WithLogger(ctx, logging.FromContext(ctx).With("id", id))
179+
ctx = log.IntoContext(ctx, log.FromContext(ctx).WithValues("id", id))
182180
return c.instanceProvider.Delete(ctx, id)
183181
}
184182

pkg/cloudprovider/suite_test.go

+1-1
Original file line numberDiff line numberDiff line change
@@ -55,8 +55,8 @@ import (
5555

5656
. "github.com/onsi/ginkgo/v2"
5757
. "github.com/onsi/gomega"
58-
. "knative.dev/pkg/logging/testing"
5958
. "sigs.k8s.io/karpenter/pkg/test/expectations"
59+
. "sigs.k8s.io/karpenter/pkg/utils/testing"
6060
)
6161

6262
var ctx context.Context

pkg/controllers/interruption/controller.go

+8-8
Original file line numberDiff line numberDiff line change
@@ -26,8 +26,8 @@ import (
2626
v1 "k8s.io/api/core/v1"
2727
"k8s.io/client-go/util/workqueue"
2828
"k8s.io/utils/clock"
29-
"knative.dev/pkg/logging"
3029
"sigs.k8s.io/controller-runtime/pkg/client"
30+
"sigs.k8s.io/controller-runtime/pkg/log"
3131
"sigs.k8s.io/controller-runtime/pkg/manager"
3232
"sigs.k8s.io/controller-runtime/pkg/reconcile"
3333
"sigs.k8s.io/karpenter/pkg/metrics"
@@ -81,9 +81,9 @@ func NewController(kubeClient client.Client, clk clock.Clock, recorder events.Re
8181
}
8282

8383
func (c *Controller) Reconcile(ctx context.Context, _ reconcile.Request) (reconcile.Result, error) {
84-
ctx = logging.WithLogger(ctx, logging.FromContext(ctx).With("queue", c.sqsProvider.Name()))
84+
ctx = log.IntoContext(ctx, log.FromContext(ctx).WithValues("queue", c.sqsProvider.Name()))
8585
if c.cm.HasChanged(c.sqsProvider.Name(), nil) {
86-
logging.FromContext(ctx).Debugf("watching interruption queue")
86+
log.FromContext(ctx).V(1).Info("watching interruption queue")
8787
}
8888
sqsMessages, err := c.sqsProvider.GetSQSMessages(ctx)
8989
if err != nil {
@@ -105,7 +105,7 @@ func (c *Controller) Reconcile(ctx context.Context, _ reconcile.Request) (reconc
105105
msg, e := c.parseMessage(sqsMessages[i])
106106
if e != nil {
107107
// If we fail to parse, then we should delete the message but still log the error
108-
logging.FromContext(ctx).Errorf("parsing message, %v", e)
108+
log.FromContext(ctx).Error(err, "failed parsing interruption message")
109109
errs[i] = c.deleteMessage(ctx, sqsMessages[i])
110110
return
111111
}
@@ -144,7 +144,7 @@ func (c *Controller) parseMessage(raw *sqsapi.Message) (messages.Message, error)
144144
func (c *Controller) handleMessage(ctx context.Context, nodeClaimInstanceIDMap map[string]*v1beta1.NodeClaim,
145145
nodeInstanceIDMap map[string]*v1.Node, msg messages.Message) (err error) {
146146

147-
ctx = logging.WithLogger(ctx, logging.FromContext(ctx).With("messageKind", msg.Kind()))
147+
ctx = log.IntoContext(ctx, log.FromContext(ctx).WithValues("messageKind", msg.Kind()))
148148
receivedMessages.WithLabelValues(string(msg.Kind())).Inc()
149149

150150
if msg.Kind() == messages.NoOpKind {
@@ -179,9 +179,9 @@ func (c *Controller) deleteMessage(ctx context.Context, msg *sqsapi.Message) err
179179
// handleNodeClaim retrieves the action for the message and then performs the appropriate action against the node
180180
func (c *Controller) handleNodeClaim(ctx context.Context, msg messages.Message, nodeClaim *v1beta1.NodeClaim, node *v1.Node) error {
181181
action := actionForMessage(msg)
182-
ctx = logging.WithLogger(ctx, logging.FromContext(ctx).With("nodeclaim", nodeClaim.Name, "action", string(action)))
182+
ctx = log.IntoContext(ctx, log.FromContext(ctx).WithValues("nodeclaim", nodeClaim.Name, "action", string(action)))
183183
if node != nil {
184-
ctx = logging.WithLogger(ctx, logging.FromContext(ctx).With("node", node.Name))
184+
ctx = log.IntoContext(ctx, log.FromContext(ctx).WithValues("node", node.Name))
185185
}
186186

187187
// Record metric and event for this action
@@ -215,7 +215,7 @@ func (c *Controller) deleteNodeClaim(ctx context.Context, nodeClaim *v1beta1.Nod
215215
if err := c.kubeClient.Delete(ctx, nodeClaim); err != nil {
216216
return client.IgnoreNotFound(fmt.Errorf("deleting the node on interruption message, %w", err))
217217
}
218-
logging.FromContext(ctx).Infof("initiating delete from interruption message")
218+
log.FromContext(ctx).Info("initiating delete from interruption message")
219219
c.recorder.Publish(interruptionevents.TerminatingOnInterruption(node, nodeClaim)...)
220220
metrics.NodeClaimsTerminatedCounter.With(prometheus.Labels{
221221
metrics.ReasonLabel: terminationReasonLabel,

pkg/controllers/interruption/interruption_benchmark_test.go

+10-9
Original file line numberDiff line numberDiff line change
@@ -32,16 +32,17 @@ import (
3232
"github.com/aws/aws-sdk-go/aws/session"
3333
servicesqs "github.com/aws/aws-sdk-go/service/sqs"
3434
"github.com/aws/aws-sdk-go/service/sqs/sqsiface"
35+
"github.com/go-logr/zapr"
3536
"github.com/samber/lo"
3637
"go.uber.org/multierr"
3738
"go.uber.org/zap"
3839
v1 "k8s.io/api/core/v1"
3940
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
4041
"k8s.io/client-go/util/workqueue"
4142
clock "k8s.io/utils/clock/testing"
42-
"knative.dev/pkg/logging"
4343
controllerruntime "sigs.k8s.io/controller-runtime"
4444
"sigs.k8s.io/controller-runtime/pkg/client"
45+
"sigs.k8s.io/controller-runtime/pkg/log"
4546
"sigs.k8s.io/karpenter/pkg/apis/v1beta1"
4647

4748
"sigs.k8s.io/karpenter/pkg/operator/scheme"
@@ -78,7 +79,7 @@ func BenchmarkNotification100(b *testing.B) {
7879

7980
//nolint:gocyclo
8081
func benchmarkNotificationController(b *testing.B, messageCount int) {
81-
ctx = logging.WithLogger(ctx, logging.FromContext(ctx).With("message-count", messageCount))
82+
ctx = log.IntoContext(ctx, log.FromContext(ctx).WithValues("message-count", messageCount))
8283
fakeClock = &clock.FakeClock{}
8384
ctx = coreoptions.ToContext(ctx, coretest.Options())
8485
ctx = options.ToContext(ctx, test.Options(test.OptionsFields{
@@ -118,20 +119,20 @@ func benchmarkNotificationController(b *testing.B, messageCount int) {
118119
interruptionController := interruption.NewController(env.Client, fakeClock, recorder, providers.sqsProvider, unavailableOfferingsCache)
119120

120121
messages, nodes := makeDiverseMessagesAndNodes(messageCount)
121-
logging.FromContext(ctx).Infof("provisioning nodes")
122+
log.FromContext(ctx).Info("provisioning nodes")
122123
if err := provisionNodes(ctx, env.Client, nodes); err != nil {
123124
b.Fatalf("provisioning nodes, %v", err)
124125
}
125-
logging.FromContext(ctx).Infof("completed provisioning nodes")
126+
log.FromContext(ctx).Info("completed provisioning nodes")
126127

127-
logging.FromContext(ctx).Infof("provisioning messages into the SQS Queue")
128+
log.FromContext(ctx).Info("provisioning messages into the SQS Queue")
128129
if err := providers.provisionMessages(ctx, messages...); err != nil {
129130
b.Fatalf("provisioning messages, %v", err)
130131
}
131-
logging.FromContext(ctx).Infof("completed provisioning messages into the SQS Queue")
132+
log.FromContext(ctx).Info("completed provisioning messages into the SQS Queue")
132133

133134
m, err := controllerruntime.NewManager(env.Config, controllerruntime.Options{
134-
BaseContext: func() context.Context { return logging.WithLogger(ctx, zap.NewNop().Sugar()) },
135+
BaseContext: func() context.Context { return log.IntoContext(ctx, zapr.NewLogger(zap.NewNop())) },
135136
})
136137
if err != nil {
137138
b.Fatalf("creating manager, %v", err)
@@ -146,7 +147,7 @@ func benchmarkNotificationController(b *testing.B, messageCount int) {
146147
start := time.Now()
147148
managerErr := make(chan error)
148149
go func() {
149-
logging.FromContext(ctx).Infof("starting controller manager")
150+
log.FromContext(ctx).Info("starting controller manager")
150151
managerErr <- m.Start(ctx)
151152
}()
152153

@@ -225,7 +226,7 @@ func (p *providerSet) monitorMessagesProcessed(ctx context.Context, eventRecorde
225226
eventRecorder.Calls(events.Unhealthy(coretest.Node(), coretest.NodeClaim())[0].Reason) +
226227
eventRecorder.Calls(events.RebalanceRecommendation(coretest.Node(), coretest.NodeClaim())[0].Reason) +
227228
eventRecorder.Calls(events.SpotInterrupted(coretest.Node(), coretest.NodeClaim())[0].Reason)
228-
logging.FromContext(ctx).With("processed-message-count", totalProcessed).Infof("processed messages from the queue")
229+
log.FromContext(ctx).WithValues("processed-message-count", totalProcessed).Info("processed messages from the queue")
229230
time.Sleep(time.Second)
230231
}
231232
close(done)

pkg/controllers/interruption/suite_test.go

+1-2
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,6 @@ import (
3131
"k8s.io/apimachinery/pkg/util/uuid"
3232
"k8s.io/client-go/tools/record"
3333
clock "k8s.io/utils/clock/testing"
34-
_ "knative.dev/pkg/system/testing"
3534
"sigs.k8s.io/controller-runtime/pkg/client"
3635

3736
corev1beta1 "sigs.k8s.io/karpenter/pkg/apis/v1beta1"
@@ -53,8 +52,8 @@ import (
5352

5453
. "github.com/onsi/ginkgo/v2"
5554
. "github.com/onsi/gomega"
56-
. "knative.dev/pkg/logging/testing"
5755
. "sigs.k8s.io/karpenter/pkg/test/expectations"
56+
. "sigs.k8s.io/karpenter/pkg/utils/testing"
5857
)
5958

6059
const (

pkg/controllers/nodeclaim/garbagecollection/controller.go

+4-4
Original file line numberDiff line numberDiff line change
@@ -24,8 +24,8 @@ import (
2424
v1 "k8s.io/api/core/v1"
2525
"k8s.io/apimachinery/pkg/util/sets"
2626
"k8s.io/client-go/util/workqueue"
27-
"knative.dev/pkg/logging"
2827
"sigs.k8s.io/controller-runtime/pkg/client"
28+
"sigs.k8s.io/controller-runtime/pkg/log"
2929
"sigs.k8s.io/controller-runtime/pkg/manager"
3030
"sigs.k8s.io/controller-runtime/pkg/reconcile"
3131
"sigs.k8s.io/karpenter/pkg/cloudprovider"
@@ -85,11 +85,11 @@ func (c *Controller) Reconcile(ctx context.Context, _ reconcile.Request) (reconc
8585
}
8686

8787
func (c *Controller) garbageCollect(ctx context.Context, nodeClaim *v1beta1.NodeClaim, nodeList *v1.NodeList) error {
88-
ctx = logging.WithLogger(ctx, logging.FromContext(ctx).With("provider-id", nodeClaim.Status.ProviderID))
88+
ctx = log.IntoContext(ctx, log.FromContext(ctx).WithValues("provider-id", nodeClaim.Status.ProviderID))
8989
if err := c.cloudProvider.Delete(ctx, nodeClaim); err != nil {
9090
return cloudprovider.IgnoreNodeClaimNotFoundError(err)
9191
}
92-
logging.FromContext(ctx).Debugf("garbage collected cloudprovider instance")
92+
log.FromContext(ctx).V(1).Info("garbage collected cloudprovider instance")
9393

9494
// Go ahead and cleanup the node if we know that it exists to make scheduling go quicker
9595
if node, ok := lo.Find(nodeList.Items, func(n v1.Node) bool {
@@ -98,7 +98,7 @@ func (c *Controller) garbageCollect(ctx context.Context, nodeClaim *v1beta1.Node
9898
if err := c.kubeClient.Delete(ctx, &node); err != nil {
9999
return client.IgnoreNotFound(err)
100100
}
101-
logging.FromContext(ctx).With("node", node.Name).Debugf("garbage collected node")
101+
log.FromContext(ctx).WithValues("node", node.Name).V(1).Info("garbage collected node")
102102
}
103103
return nil
104104
}

pkg/controllers/nodeclaim/garbagecollection/suite_test.go

+1-1
Original file line numberDiff line numberDiff line change
@@ -44,8 +44,8 @@ import (
4444

4545
. "github.com/onsi/ginkgo/v2"
4646
. "github.com/onsi/gomega"
47-
. "knative.dev/pkg/logging/testing"
4847
. "sigs.k8s.io/karpenter/pkg/test/expectations"
48+
. "sigs.k8s.io/karpenter/pkg/utils/testing"
4949
)
5050

5151
var ctx context.Context

0 commit comments

Comments
 (0)