Skip to content

Commit d428ff2

Browse files
authored
Merge pull request #184 from red-hat-storage/sync_us--main
Syncing latest changes from upstream main for ramen
2 parents 2dcfa67 + 85b3853 commit d428ff2

10 files changed

+136
-38
lines changed

controllers/drcluster_controller.go

+2-2
Original file line numberDiff line numberDiff line change
@@ -545,12 +545,12 @@ func (u *drclusterInstance) statusUpdate() error {
545545
return fmt.Errorf("failed to update drCluster status (%s/%s)", u.object.Name, u.object.Namespace)
546546
}
547547

548-
u.log.Info(fmt.Sprintf("Updated drCluster Status %+v", u.object.Status))
548+
u.log.Info(fmt.Sprintf("Updated drCluster Status (%s/%s)", u.object.Name, u.object.Namespace))
549549

550550
return nil
551551
}
552552

553-
u.log.Info(fmt.Sprintf("Nothing to update %+v", u.object.Status))
553+
u.log.Info(fmt.Sprintf("Nothing to update (%s/%s)", u.object.Name, u.object.Namespace))
554554

555555
return nil
556556
}

controllers/drplacementcontrol.go

+6-6
Original file line numberDiff line numberDiff line change
@@ -126,8 +126,8 @@ func (d *DRPCInstance) RunInitialDeployment() (bool, error) {
126126
return !done, err
127127
}
128128

129-
d.log.Info(fmt.Sprintf("Using homeCluster %s for initial deployment, Placement Decision %+v",
130-
homeCluster, d.reconciler.getClusterDecision(d.userPlacement)))
129+
d.log.Info(fmt.Sprintf("Using homeCluster %s for initial deployment",
130+
homeCluster))
131131

132132
// Check if we already deployed in the homeCluster or elsewhere
133133
deployed, clusterName := d.isDeployed(homeCluster)
@@ -804,7 +804,7 @@ func (d *DRPCInstance) RunRelocate() (bool, error) {
804804
}
805805

806806
if d.getLastDRState() != rmn.Relocating && !d.validatePeerReady() {
807-
return !done, fmt.Errorf("clean up secondaries is pending (%+v)", d.instance.Status.Conditions)
807+
return !done, fmt.Errorf("clean up secondaries is pending, peer is not ready")
808808
}
809809

810810
if curHomeCluster != "" && curHomeCluster != preferredCluster {
@@ -1102,7 +1102,7 @@ func (d *DRPCInstance) isVRGConditionMet(cluster string, conditionType string) b
11021102
return !ready
11031103
}
11041104

1105-
d.log.Info(fmt.Sprintf("VRG status condition: %+v", condition))
1105+
d.log.Info(fmt.Sprintf("VRG status condition: %s is %s", conditionType, condition.Status))
11061106

11071107
return condition.Status == metav1.ConditionTrue &&
11081108
condition.ObservedGeneration == vrg.Generation
@@ -1712,7 +1712,7 @@ func (d *DRPCInstance) EnsureCleanup(clusterToSkip string) error {
17121712
return nil
17131713
}
17141714

1715-
d.log.Info(fmt.Sprintf("PeerReady Condition %v", condition))
1715+
d.log.Info(fmt.Sprintf("PeerReady Condition is %s, msg: %s", condition.Status, condition.Message))
17161716

17171717
// IFF we have VolSync PVCs, then no need to clean up
17181718
homeCluster := clusterToSkip
@@ -2012,7 +2012,7 @@ func (d *DRPCInstance) ensureVRGDeleted(clusterName string) bool {
20122012
return false
20132013
}
20142014

2015-
d.log.Info(fmt.Sprintf("VRG not deleted(%v)", vrg.ObjectMeta))
2015+
d.log.Info(fmt.Sprintf("VRG not deleted(%s)", vrg.Name))
20162016

20172017
return false
20182018
}

controllers/drplacementcontrol_controller.go

+3-5
Original file line numberDiff line numberDiff line change
@@ -1342,8 +1342,6 @@ func getPlacementRule(ctx context.Context, k8sclient client.Client,
13421342
log.Info("User PlacementRule replica count is not set to 1, reconciliation will only" +
13431343
" schedule it to a single cluster")
13441344
}
1345-
1346-
log.Info(fmt.Sprintf("PlacementRule Status is: (%+v)", usrPlRule.Status))
13471345
}
13481346

13491347
return usrPlRule, nil
@@ -1702,7 +1700,7 @@ func (r *DRPlacementControlReconciler) updateDRPCStatus(
17021700
return errorswrapper.Wrap(err, "failed to update DRPC status")
17031701
}
17041702

1705-
log.Info(fmt.Sprintf("Updated DRPC Status %+v", drpc.Status))
1703+
log.Info("Updated DRPC Status")
17061704

17071705
return nil
17081706
}
@@ -1721,7 +1719,7 @@ func (r *DRPlacementControlReconciler) updateResourceCondition(
17211719
vrg, err := r.MCVGetter.GetVRGFromManagedCluster(drpc.Name, vrgNamespace,
17221720
clusterName, annotations)
17231721
if err != nil {
1724-
log.Info("Failed to get VRG from managed cluster", "errMsg", err)
1722+
log.Info("Failed to get VRG from managed cluster", "errMsg", err.Error())
17251723

17261724
drpc.Status.ResourceConditions = rmn.VRGConditions{}
17271725
} else {
@@ -1969,7 +1967,7 @@ func (r *DRPlacementControlReconciler) createOrUpdatePlacementDecision(ctx conte
19691967
return fmt.Errorf("failed to update placementDecision status (%w)", err)
19701968
}
19711969

1972-
r.Log.Info("Created/Updated PlacementDecision", "PlacementDecision", plDecision)
1970+
r.Log.Info("Created/Updated PlacementDecision", "PlacementDecision", plDecision.Status.Decisions)
19731971

19741972
return nil
19751973
}

controllers/drplacementcontrolvolsync.go

+1-1
Original file line numberDiff line numberDiff line change
@@ -278,7 +278,7 @@ func (d *DRPCInstance) updateVRGSpec(clusterName string, tgtVRG *rmn.VolumeRepli
278278
return fmt.Errorf("failed to update MW (%w)", err)
279279
}
280280

281-
d.log.Info(fmt.Sprintf("Updated VRG running in cluster %s. VRG (%+v)", clusterName, vrg))
281+
d.log.Info(fmt.Sprintf("Updated VRG running in cluster %s. VRG (%s)", clusterName, vrg.Name))
282282

283283
return nil
284284
}

controllers/protectedvolumereplicationgrouplist_controller.go

+16-9
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@ import (
77
"context"
88
"fmt"
99
"strings"
10+
"time"
1011

1112
"github.com/ramendr/ramen/controllers/util"
1213
"k8s.io/apimachinery/pkg/runtime"
@@ -50,31 +51,37 @@ type ProtectedVolumeReplicationGroupListInstance struct {
5051
// - https://pkg.go.dev/sigs.k8s.io/[email protected]/pkg/reconcile
5152
func (r *ProtectedVolumeReplicationGroupListReconciler) Reconcile(ctx context.Context, req ctrl.Request,
5253
) (ctrl.Result, error) {
53-
log := ctrl.Log.WithName("controllers").WithName("protectedvolumereplicationgrouplist").
54-
WithValues("name", req.NamespacedName.Name)
55-
56-
log.Info("protectedvolumereplicationgrouplist reconcile start")
57-
54+
start := time.Now()
5855
// save all the commonly used parameters in a struct
5956
s := ProtectedVolumeReplicationGroupListInstance{
6057
reconciler: r,
6158
ctx: ctx,
62-
log: log,
59+
log: ctrl.Log.WithName("pvrgl").WithValues("name", req.NamespacedName.Name),
6360
instance: &ramendrv1alpha1.ProtectedVolumeReplicationGroupList{},
6461
}
6562

6663
// get ProtectedVolumeReplicationGroupListInstance and save to s.instance
67-
if err := r.Client.Get(ctx, req.NamespacedName, s.instance); err != nil {
64+
if err := r.Client.Get(s.ctx, req.NamespacedName, s.instance); err != nil {
6865
return ctrl.Result{}, client.IgnoreNotFound(fmt.Errorf("get: %w", err))
6966
}
7067

68+
s.log = s.log.WithValues("rid", s.instance.ObjectMeta.UID, "gen", s.instance.ObjectMeta.Generation,
69+
"rv", s.instance.ObjectMeta.ResourceVersion)
70+
s.ctx = ctrl.LoggerInto(ctx, s.log)
71+
72+
s.log.Info("reconcile start")
73+
74+
defer func() {
75+
s.log.Info("reconcile end", "time spent", time.Since(start))
76+
}()
77+
7178
if s.instance.Status != nil {
7279
return ctrl.Result{}, nil
7380
}
7481

7582
// get target profile from spec
7683
s3profileName := s.instance.Spec.S3ProfileName
77-
log.Info(fmt.Sprintf("targetProfileName=%s", s3profileName))
84+
s.log.Info(fmt.Sprintf("targetProfileName=%s", s3profileName))
7885

7986
objectStore, _, err := s.reconciler.ObjStoreGetter.ObjectStore(
8087
s.ctx, s.reconciler.APIReader, s3profileName, NamespaceName(), s.log)
@@ -100,7 +107,7 @@ func (r *ProtectedVolumeReplicationGroupListReconciler) Reconcile(ctx context.Co
100107
return ctrl.Result{}, fmt.Errorf("error during updateStatus: %w", err)
101108
}
102109

103-
log.Info("protectedvolumereplicationgrouplist updated successfully")
110+
s.log.Info("updated successfully")
104111

105112
return ctrl.Result{}, nil
106113
}

controllers/util/mcv_util.go

+4-4
Original file line numberDiff line numberDiff line change
@@ -322,8 +322,8 @@ func (m ManagedClusterViewGetterImpl) getOrCreateManagedClusterView(
322322
return nil, errorswrapper.Wrap(err, "failed to get ManagedClusterView")
323323
}
324324

325-
logger.Info(fmt.Sprintf("Creating ManagedClusterView %s with scope %+v",
326-
key, viewscope))
325+
logger.Info(fmt.Sprintf("Creating ManagedClusterView %s with scope %s",
326+
key, viewscope.Name))
327327

328328
if err := m.Create(context.TODO(), mcv); err != nil {
329329
return nil, errorswrapper.Wrap(err, "failed to create ManagedClusterView")
@@ -332,8 +332,8 @@ func (m ManagedClusterViewGetterImpl) getOrCreateManagedClusterView(
332332

333333
if mcv.Spec.Scope != viewscope {
334334
// Expected once when uprading ramen if scope format or details have changed.
335-
logger.Info(fmt.Sprintf("Updating ManagedClusterView %s scope %+v to %+v",
336-
key, mcv.Spec.Scope, viewscope))
335+
logger.Info(fmt.Sprintf("Updating ManagedClusterView %s scope %s to %s",
336+
key, mcv.Spec.Scope.Name, viewscope.Name))
337337

338338
mcv.Spec.Scope = viewscope
339339
if err := m.Update(context.TODO(), mcv); err != nil {

controllers/util/mw_util.go

-8
Original file line numberDiff line numberDiff line change
@@ -118,9 +118,6 @@ func (mwu *MWUtil) CreateOrUpdateVRGManifestWork(
118118
name, namespace, homeCluster string,
119119
vrg rmn.VolumeReplicationGroup, annotations map[string]string,
120120
) error {
121-
mwu.Log.Info(fmt.Sprintf("Create or Update manifestwork %s:%s:%s:%+v",
122-
name, namespace, homeCluster, vrg))
123-
124121
manifestWork, err := mwu.generateVRGManifestWork(name, namespace, homeCluster, vrg, annotations)
125122
if err != nil {
126123
return err
@@ -157,8 +154,6 @@ func (mwu *MWUtil) CreateOrUpdateMModeManifestWork(
157154
name, cluster string,
158155
mMode rmn.MaintenanceMode, annotations map[string]string,
159156
) error {
160-
mwu.Log.Info(fmt.Sprintf("Create or Update manifestwork %s:%s:%+v", name, cluster, mMode))
161-
162157
manifestWork, err := mwu.generateMModeManifestWork(name, cluster, mMode, annotations)
163158
if err != nil {
164159
return err
@@ -238,9 +233,6 @@ func (mwu *MWUtil) CreateOrUpdateNFManifestWork(
238233
name, namespace, homeCluster string,
239234
nf csiaddonsv1alpha1.NetworkFence, annotations map[string]string,
240235
) error {
241-
mwu.Log.Info(fmt.Sprintf("Create or Update manifestwork %s:%s:%s:%+v",
242-
name, namespace, homeCluster, nf))
243-
244236
manifestWork, err := mwu.generateNFManifestWork(name, namespace, homeCluster, nf, annotations)
245237
if err != nil {
246238
return err

controllers/volumereplicationgroup_controller.go

+2-2
Original file line numberDiff line numberDiff line change
@@ -1038,8 +1038,8 @@ func (v *VRGInstance) updateVRGStatus(result ctrl.Result) ctrl.Result {
10381038
if !reflect.DeepEqual(v.savedInstanceStatus, v.instance.Status) {
10391039
v.instance.Status.LastUpdateTime = metav1.Now()
10401040
if err := v.reconciler.Status().Update(v.ctx, v.instance); err != nil {
1041-
v.log.Info(fmt.Sprintf("Failed to update VRG status (%v/%+v)",
1042-
err, v.instance.Status))
1041+
v.log.Info(fmt.Sprintf("Failed to update VRG status (%v/%s)",
1042+
err, v.instance.Name))
10431043

10441044
result.Requeue = true
10451045

controllers/vrg_recipe.go

+1-1
Original file line numberDiff line numberDiff line change
@@ -119,7 +119,7 @@ func RecipeParametersExpand(recipe *recipe.Recipe, parameters map[string][]strin
119119

120120
bytes, err := json.Marshal(*spec)
121121
if err != nil {
122-
return fmt.Errorf("recipe spec %+v json marshal error: %w", *spec, err)
122+
return fmt.Errorf("recipe %s json marshal error: %w", recipe.GetName(), err)
123123
}
124124

125125
s1 := string(bytes)

docs/logging.md

+101
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,101 @@
1+
# Logging
2+
3+
This document outlines the logging standards and best practices for Ramen.
4+
5+
## General Principles
6+
7+
1. **Consistency**: All controllers should follow the same logging conventions.
8+
1. **Performance Awareness**: Logging should not significantly impact the
9+
performance. Be mindful of the logging levels and the amount of data logged.
10+
11+
## Logger Configuration
12+
13+
- Each controller should create its logger instance with a name specific to the
14+
controller for easy identification. For example, if the controller is named
15+
`ProtectedVolumeReplicationGroupList`, the logger name could be a shortened
16+
form of the controller like `pvrgl`.
17+
18+
```go
19+
logger := ctrl.Log.WithName("pvrgl")
20+
```
21+
22+
- Loggers should be enriched with key contextual information to make the logs
23+
more informative. This includes:
24+
- The name of the custom resource.
25+
- Then namespace of the custom resource, if applicable.
26+
- The UID of the custom resource.
27+
- The version of the custom resource.
28+
29+
```go
30+
logger = logger.WithValues("name", req.NamespacedName.Name, "rid", instance.ObjectMeta.UID, "version", instance.ObjectMeta.ResourceVersion)
31+
```
32+
33+
- The context passed to functions should be augmented with the logger instance:
34+
35+
```go
36+
ctx = ctrl.LoggerInto(ctx, logger)
37+
```
38+
39+
This will be useful when we call functions that are outside of the controller
40+
scope and the instance.log object is not available to them.
41+
42+
## Log Messages
43+
44+
- Log messages should be concise yet informative. Wherever possible, use
45+
structured logging to provide context.
46+
47+
- Start and end of significant operations, like reconciliation, should be logged:
48+
49+
```go
50+
logger.Info("reconcile start")
51+
defer logger.Info("reconcile end", "time spent", time.Since(start))
52+
```
53+
54+
- When logging errors, include the context of the error:
55+
56+
```go
57+
logger.Error(err, "Error description", "key", "value")
58+
```
59+
60+
- Use key-value pairs instead of concatenating the message and data.
61+
62+
For example, avoid logging like this:
63+
64+
```go
65+
log.Info(fmt.Sprintf("Requeuing after %v", duration))
66+
```
67+
68+
This approach logs everything as one string, making it harder to parse and filter.
69+
70+
Log additional information as key-value pairs. This method ensures that
71+
each piece of information is logged as a separate field, making it easier
72+
to search and analyze.
73+
74+
Example of a well-structured log message:
75+
76+
```go
77+
s.log.Info("reconcile end", "time spent", time.Since(start))
78+
```
79+
80+
This will log in the following format, providing clear and structured context:
81+
82+
```
83+
2024-01-31T14:27:12.726-0500 INFO pvrgl reconcile end {"name": "protectedvrglist-0", "rid": "1e09b0fb-687b-4100-9ab1-a52ba899b37b", "rv": "322", "time spent": 6ms}
84+
```
85+
86+
## Debugging with Logs
87+
88+
The structured logs help debugging. Here's how you can effectively use the logs
89+
for debugging purposes:
90+
91+
### Filtering Logs for a Specific Resource
92+
93+
To focus on logs related to a specific resource, you can use `grep` with a
94+
pattern that matches the resource's unique identifiers. For example:
95+
96+
```
97+
grep -e 'pvrgl.*4db288b5-3f03-441c-bc44-00e356e77f62'
98+
```
99+
100+
This command filters logs to show entries for the resource with UID
101+
`4db288b5-3f03-441c-bc44-00e356e77f62`.

0 commit comments

Comments
 (0)