Skip to content

Commit afd4271

Browse files
ELENAGERraghavendra-talur
authored andcommitted
Eliminate printing to log in format '%+v'
Signed-off-by: Elena Gershkovich <[email protected]>
1 parent 6f16f7e commit afd4271

8 files changed

+15
-25
lines changed

controllers/drcluster_controller.go

+2-2
Original file line numberDiff line numberDiff line change
@@ -545,12 +545,12 @@ func (u *drclusterInstance) statusUpdate() error {
545545
return fmt.Errorf("failed to update drCluster status (%s/%s)", u.object.Name, u.object.Namespace)
546546
}
547547

548-
u.log.Info(fmt.Sprintf("Updated drCluster Status %+v", u.object.Status))
548+
u.log.Info(fmt.Sprintf("Updated drCluster Status (%s/%s)", u.object.Name, u.object.Namespace))
549549

550550
return nil
551551
}
552552

553-
u.log.Info(fmt.Sprintf("Nothing to update %+v", u.object.Status))
553+
u.log.Info(fmt.Sprintf("Nothing to update (%s/%s)", u.object.Name, u.object.Namespace))
554554

555555
return nil
556556
}

controllers/drplacementcontrol.go

+4-4
Original file line numberDiff line numberDiff line change
@@ -126,8 +126,8 @@ func (d *DRPCInstance) RunInitialDeployment() (bool, error) {
126126
return !done, err
127127
}
128128

129-
d.log.Info(fmt.Sprintf("Using homeCluster %s for initial deployment, Placement Decision %+v",
130-
homeCluster, d.reconciler.getClusterDecision(d.userPlacement)))
129+
d.log.Info(fmt.Sprintf("Using homeCluster %s for initial deployment",
130+
homeCluster))
131131

132132
// Check if we already deployed in the homeCluster or elsewhere
133133
deployed, clusterName := d.isDeployed(homeCluster)
@@ -804,7 +804,7 @@ func (d *DRPCInstance) RunRelocate() (bool, error) {
804804
}
805805

806806
if d.getLastDRState() != rmn.Relocating && !d.validatePeerReady() {
807-
return !done, fmt.Errorf("clean up secondaries is pending (%+v)", d.instance.Status.Conditions)
807+
return !done, fmt.Errorf("clean up secondaries is pending, peer is not ready")
808808
}
809809

810810
if curHomeCluster != "" && curHomeCluster != preferredCluster {
@@ -1102,7 +1102,7 @@ func (d *DRPCInstance) isVRGConditionMet(cluster string, conditionType string) b
11021102
return !ready
11031103
}
11041104

1105-
d.log.Info(fmt.Sprintf("VRG status condition: %+v", condition))
1105+
d.log.Info(fmt.Sprintf("VRG status condition: %s is %s", conditionType, condition.Status))
11061106

11071107
return condition.Status == metav1.ConditionTrue &&
11081108
condition.ObservedGeneration == vrg.Generation

controllers/drplacementcontrol_controller.go

+1-3
Original file line numberDiff line numberDiff line change
@@ -1342,8 +1342,6 @@ func getPlacementRule(ctx context.Context, k8sclient client.Client,
13421342
log.Info("User PlacementRule replica count is not set to 1, reconciliation will only" +
13431343
" schedule it to a single cluster")
13441344
}
1345-
1346-
log.Info(fmt.Sprintf("PlacementRule Status is: (%+v)", usrPlRule.Status))
13471345
}
13481346

13491347
return usrPlRule, nil
@@ -1702,7 +1700,7 @@ func (r *DRPlacementControlReconciler) updateDRPCStatus(
17021700
return errorswrapper.Wrap(err, "failed to update DRPC status")
17031701
}
17041702

1705-
log.Info(fmt.Sprintf("Updated DRPC Status %+v", drpc.Status))
1703+
log.Info("Updated DRPC Status")
17061704

17071705
return nil
17081706
}

controllers/drplacementcontrolvolsync.go

+1-1
Original file line numberDiff line numberDiff line change
@@ -278,7 +278,7 @@ func (d *DRPCInstance) updateVRGSpec(clusterName string, tgtVRG *rmn.VolumeRepli
278278
return fmt.Errorf("failed to update MW (%w)", err)
279279
}
280280

281-
d.log.Info(fmt.Sprintf("Updated VRG running in cluster %s. VRG (%+v)", clusterName, vrg))
281+
d.log.Info(fmt.Sprintf("Updated VRG running in cluster %s. VRG (%s)", clusterName, vrg.Name))
282282

283283
return nil
284284
}

controllers/util/mcv_util.go

+4-4
Original file line numberDiff line numberDiff line change
@@ -322,8 +322,8 @@ func (m ManagedClusterViewGetterImpl) getOrCreateManagedClusterView(
322322
return nil, errorswrapper.Wrap(err, "failed to get ManagedClusterView")
323323
}
324324

325-
logger.Info(fmt.Sprintf("Creating ManagedClusterView %s with scope %+v",
326-
key, viewscope))
325+
logger.Info(fmt.Sprintf("Creating ManagedClusterView %s with scope %s",
326+
key, viewscope.Name))
327327

328328
if err := m.Create(context.TODO(), mcv); err != nil {
329329
return nil, errorswrapper.Wrap(err, "failed to create ManagedClusterView")
@@ -332,8 +332,8 @@ func (m ManagedClusterViewGetterImpl) getOrCreateManagedClusterView(
332332

333333
if mcv.Spec.Scope != viewscope {
334334
// Expected once when uprading ramen if scope format or details have changed.
335-
logger.Info(fmt.Sprintf("Updating ManagedClusterView %s scope %+v to %+v",
336-
key, mcv.Spec.Scope, viewscope))
335+
logger.Info(fmt.Sprintf("Updating ManagedClusterView %s scope %s to %s",
336+
key, mcv.Spec.Scope.Name, viewscope.Name))
337337

338338
mcv.Spec.Scope = viewscope
339339
if err := m.Update(context.TODO(), mcv); err != nil {

controllers/util/mw_util.go

-8
Original file line numberDiff line numberDiff line change
@@ -118,9 +118,6 @@ func (mwu *MWUtil) CreateOrUpdateVRGManifestWork(
118118
name, namespace, homeCluster string,
119119
vrg rmn.VolumeReplicationGroup, annotations map[string]string,
120120
) error {
121-
mwu.Log.Info(fmt.Sprintf("Create or Update manifestwork %s:%s:%s:%+v",
122-
name, namespace, homeCluster, vrg))
123-
124121
manifestWork, err := mwu.generateVRGManifestWork(name, namespace, homeCluster, vrg, annotations)
125122
if err != nil {
126123
return err
@@ -157,8 +154,6 @@ func (mwu *MWUtil) CreateOrUpdateMModeManifestWork(
157154
name, cluster string,
158155
mMode rmn.MaintenanceMode, annotations map[string]string,
159156
) error {
160-
mwu.Log.Info(fmt.Sprintf("Create or Update manifestwork %s:%s:%+v", name, cluster, mMode))
161-
162157
manifestWork, err := mwu.generateMModeManifestWork(name, cluster, mMode, annotations)
163158
if err != nil {
164159
return err
@@ -238,9 +233,6 @@ func (mwu *MWUtil) CreateOrUpdateNFManifestWork(
238233
name, namespace, homeCluster string,
239234
nf csiaddonsv1alpha1.NetworkFence, annotations map[string]string,
240235
) error {
241-
mwu.Log.Info(fmt.Sprintf("Create or Update manifestwork %s:%s:%s:%+v",
242-
name, namespace, homeCluster, nf))
243-
244236
manifestWork, err := mwu.generateNFManifestWork(name, namespace, homeCluster, nf, annotations)
245237
if err != nil {
246238
return err

controllers/volumereplicationgroup_controller.go

+2-2
Original file line numberDiff line numberDiff line change
@@ -1038,8 +1038,8 @@ func (v *VRGInstance) updateVRGStatus(result ctrl.Result) ctrl.Result {
10381038
if !reflect.DeepEqual(v.savedInstanceStatus, v.instance.Status) {
10391039
v.instance.Status.LastUpdateTime = metav1.Now()
10401040
if err := v.reconciler.Status().Update(v.ctx, v.instance); err != nil {
1041-
v.log.Info(fmt.Sprintf("Failed to update VRG status (%v/%+v)",
1042-
err, v.instance.Status))
1041+
v.log.Info(fmt.Sprintf("Failed to update VRG status (%v/%s)",
1042+
err, v.instance.Name))
10431043

10441044
result.Requeue = true
10451045

controllers/vrg_recipe.go

+1-1
Original file line numberDiff line numberDiff line change
@@ -119,7 +119,7 @@ func RecipeParametersExpand(recipe *recipe.Recipe, parameters map[string][]strin
119119

120120
bytes, err := json.Marshal(*spec)
121121
if err != nil {
122-
return fmt.Errorf("recipe spec %+v json marshal error: %w", *spec, err)
122+
return fmt.Errorf("recipe %s json marshal error: %w", recipe.GetName(), err)
123123
}
124124

125125
s1 := string(bytes)

0 commit comments

Comments
 (0)