Skip to content

Commit e4297b4

Browse files
authored
op-batcher: improve computeSyncActions() logging (#14563)
* improve computeSyncActions logging * fixup test and make sure all cases run (!) * use more friendly format for structured logger
1 parent 5523d7a commit e4297b4

File tree

2 files changed

+40
-20
lines changed

2 files changed

+40
-20
lines changed

op-batcher/batcher/sync_actions.go

+36-18
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,11 @@ type channelStatuser interface {
1717
}
1818

1919
type inclusiveBlockRange struct{ start, end uint64 }
20+
21+
func (r *inclusiveBlockRange) TerminalString() string {
22+
return fmt.Sprintf("[%d, %d]", r.start, r.end)
23+
}
24+
2025
type syncActions struct {
2126
clearState *eth.BlockID
2227
blocksToPrune int
@@ -25,9 +30,17 @@ type syncActions struct {
2530
// NOTE this range is inclusive on both ends, which is a change to previous behaviour.
2631
}
2732

28-
func (s syncActions) String() string {
33+
func (s syncActions) TerminalString() string {
34+
cs := "nil"
35+
if s.clearState != nil {
36+
cs = s.clearState.TerminalString()
37+
}
38+
btl := "nil"
39+
if s.blocksToLoad != nil {
40+
btl = s.blocksToLoad.TerminalString()
41+
}
2942
return fmt.Sprintf(
30-
"SyncActions{blocksToPrune: %d, channelsToPrune: %d, clearState: %v, blocksToLoad: %v}", s.blocksToPrune, s.channelsToPrune, s.clearState, s.blocksToLoad)
43+
"SyncActions{blocksToPrune: %d, channelsToPrune: %d, clearState: %v, blocksToLoad: %v}", s.blocksToPrune, s.channelsToPrune, cs, btl)
3144
}
3245

3346
// computeSyncActions determines the actions that should be taken based on the inputs provided. The inputs are the current
@@ -36,15 +49,22 @@ func (s syncActions) String() string {
3649
// range to load into the local state, and whether to clear the state entirely. Returns an boolean indicating if the sequencer is out of sync.
3750
func computeSyncActions[T channelStatuser](newSyncStatus eth.SyncStatus, prevCurrentL1 eth.L1BlockRef, blocks queue.Queue[*types.Block], channels []T, l log.Logger) (syncActions, bool) {
3851

52+
m := l.With(
53+
"syncStatus.headL1", newSyncStatus.HeadL1,
54+
"syncStatus.currentL1", newSyncStatus.CurrentL1,
55+
"syncStatus.localSafeL2", newSyncStatus.LocalSafeL2,
56+
"syncStatus.unsafeL2", newSyncStatus.UnsafeL2,
57+
)
58+
3959
// PART 1: Initial checks on the sync status
4060
if newSyncStatus.HeadL1 == (eth.L1BlockRef{}) {
41-
l.Warn("empty sync status")
61+
m.Warn("empty sync status")
4262
return syncActions{}, true
4363
}
4464

4565
if newSyncStatus.CurrentL1.Number < prevCurrentL1.Number {
4666
// This can happen when the sequencer restarts
47-
l.Warn("sequencer currentL1 reversed")
67+
m.Warn("sequencer currentL1 reversed", "prevCurrentL1", prevCurrentL1)
4868
return syncActions{}, true
4969
}
5070

@@ -60,7 +80,7 @@ func computeSyncActions[T channelStatuser](newSyncStatus eth.SyncStatus, prevCur
6080
s := syncActions{
6181
blocksToLoad: allUnsafeBlocks,
6282
}
63-
l.Info("no blocks in state", "syncActions", s)
83+
m.Info("no blocks in state", "syncActions", s)
6484
return s, false
6585
}
6686

@@ -77,10 +97,9 @@ func computeSyncActions[T channelStatuser](newSyncStatus eth.SyncStatus, prevCur
7797
nextSafeBlockNum := newSyncStatus.LocalSafeL2.Number + 1
7898

7999
if nextSafeBlockNum < oldestBlockInStateNum {
80-
l.Warn("next safe block is below oldest block in state",
100+
m.Warn("next safe block is below oldest block in state",
81101
"syncActions", startAfresh,
82-
"oldestBlockInState", oldestBlockInState,
83-
"safeL2", newSyncStatus.LocalSafeL2)
102+
"oldestBlockInStateNum", oldestBlockInStateNum)
84103
return startAfresh, false
85104
}
86105

@@ -94,19 +113,17 @@ func computeSyncActions[T channelStatuser](newSyncStatus eth.SyncStatus, prevCur
94113
// This could happen if the batcher restarted.
95114
// The sequencer may have derived the safe chain
96115
// from channels sent by a previous batcher instance.
97-
l.Warn("safe head above newest block in state, clearing channel manager state",
116+
m.Warn("safe head above newest block in state, clearing channel manager state",
98117
"syncActions", startAfresh,
99-
"safeL2", newSyncStatus.LocalSafeL2,
100118
"newestBlockInState", eth.ToBlockID(newestBlockInState),
101119
)
102120
return startAfresh, false
103121
}
104122

105123
if numBlocksToDequeue > 0 && blocks[numBlocksToDequeue-1].Hash() != newSyncStatus.LocalSafeL2.Hash {
106-
l.Warn("safe chain reorg, clearing channel manager state",
124+
m.Warn("safe chain reorg, clearing channel manager state",
107125
"syncActions", startAfresh,
108-
"existingBlock", eth.ToBlockID(blocks[numBlocksToDequeue-1]),
109-
"safeL2", newSyncStatus.LocalSafeL2)
126+
"existingBlock", eth.ToBlockID(blocks[numBlocksToDequeue-1]))
110127
return startAfresh, false
111128
}
112129

@@ -120,10 +137,9 @@ func computeSyncActions[T channelStatuser](newSyncStatus eth.SyncStatus, prevCur
120137
// for a fully submitted channel. This indicates
121138
// that the derivation pipeline may have stalled
122139
// e.g. because of Holocene strict ordering rules.
123-
l.Warn("sequencer did not make expected progress",
140+
m.Warn("sequencer did not make expected progress",
124141
"syncActions", startAfresh,
125-
"existingBlock", ch.LatestL2(),
126-
"safeL2", newSyncStatus.LocalSafeL2)
142+
"existingBlock", ch.LatestL2())
127143
return startAfresh, false
128144
}
129145
}
@@ -144,9 +160,11 @@ func computeSyncActions[T channelStatuser](newSyncStatus eth.SyncStatus, prevCur
144160
allUnsafeBlocksAboveState = &inclusiveBlockRange{newestBlockInStateNum + 1, newSyncStatus.UnsafeL2.Number}
145161
}
146162

147-
return syncActions{
163+
a := syncActions{
148164
blocksToPrune: int(numBlocksToDequeue),
149165
channelsToPrune: numChannelsToPrune,
150166
blocksToLoad: allUnsafeBlocksAboveState,
151-
}, false
167+
}
168+
m.Debug("computed sync actions", "syncActions", a)
169+
return a, false
152170
}

op-batcher/batcher/sync_actions_test.go

+4-2
Original file line numberDiff line numberDiff line change
@@ -55,7 +55,7 @@ func TestBatchSubmitter_computeSyncActions(t *testing.T) {
5555
timedOut: false,
5656
}
5757

58-
happyCaseLogs := []string{} // in the happy case we expect no logs
58+
happyCaseLogs := []string{"computed sync actions"}
5959

6060
type TestCase struct {
6161
name string
@@ -196,6 +196,7 @@ func TestBatchSubmitter_computeSyncActions(t *testing.T) {
196196
expected: syncActions{
197197
blocksToLoad: &inclusiveBlockRange{104, 109},
198198
},
199+
expectedLogs: happyCaseLogs,
199200
},
200201
{name: "no blocks",
201202
// This happens when the batcher is starting up for the first time
@@ -229,6 +230,7 @@ func TestBatchSubmitter_computeSyncActions(t *testing.T) {
229230
channelsToPrune: 1,
230231
blocksToLoad: &inclusiveBlockRange{104, 109},
231232
},
233+
expectedLogs: happyCaseLogs,
232234
},
233235
{name: "happy path + multiple channels",
234236
newSyncStatus: eth.SyncStatus{
@@ -277,7 +279,7 @@ func TestBatchSubmitter_computeSyncActions(t *testing.T) {
277279
},
278280
}
279281

280-
for _, tc := range testCases[len(testCases)-1:] {
282+
for _, tc := range testCases {
281283

282284
t.Run(tc.name, func(t *testing.T) {
283285
l, h := testlog.CaptureLogger(t, log.LevelDebug)

0 commit comments

Comments
 (0)