Skip to content
Open
2 changes: 1 addition & 1 deletion arbnode/inbox_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -157,7 +157,7 @@ func NewTransactionStreamerForTest(t *testing.T, ctx context.Context, ownerAddre
Fail(t, err)
}
execSeq := &execClientWrapper{execEngine, t}
inbox, err := NewTransactionStreamer(ctx, arbDb, bc.Config(), execSeq, nil, make(chan error, 1), transactionStreamerConfigFetcher, &DefaultSnapSyncConfig)
inbox, err := NewTransactionStreamer(ctx, arbDb, bc.Config(), execSeq, nil, nil, make(chan error, 1), transactionStreamerConfigFetcher, &DefaultSnapSyncConfig)
if err != nil {
Fail(t, err)
}
Expand Down
20 changes: 15 additions & 5 deletions arbnode/node.go
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,7 @@ import (
"github.com/offchainlabs/nitro/util/redisutil"
"github.com/offchainlabs/nitro/util/rpcclient"
"github.com/offchainlabs/nitro/util/signature"
"github.com/offchainlabs/nitro/util/testhelpers/env"
"github.com/offchainlabs/nitro/wsbroadcastserver"
)

Expand Down Expand Up @@ -210,6 +211,10 @@ func ConfigDefaultL1NonSequencerTest() *Config {
config.Staker.Enable = false
config.BlockValidator.ValidationServerConfigs = []rpcclient.ClientConfig{{URL: ""}}
config.Bold.MinimumGapToParentAssertion = 0
// Disable broadcast during sync only for path tests as it causes test timeouts
if env.GetTestStateScheme() == rawdb.PathScheme {
config.TransactionStreamer.DisableBroadcastDuringSync = false
}

return &config
}
Expand All @@ -228,6 +233,10 @@ func ConfigDefaultL2Test() *Config {
config.Staker.Enable = false
config.BlockValidator.ValidationServerConfigs = []rpcclient.ClientConfig{{URL: ""}}
config.TransactionStreamer = DefaultTransactionStreamerConfig
// Disable broadcast during sync only for path tests as it causes test timeouts
if env.GetTestStateScheme() == rawdb.PathScheme {
config.TransactionStreamer.DisableBroadcastDuringSync = false
}
config.Bold.MinimumGapToParentAssertion = 0

return &config
Expand Down Expand Up @@ -828,11 +837,12 @@ func getTransactionStreamer(
l2Config *params.ChainConfig,
exec execution.ExecutionClient,
broadcastServer *broadcaster.Broadcaster,
syncMonitor *SyncMonitor,
configFetcher ConfigFetcher,
fatalErrChan chan error,
) (*TransactionStreamer, error) {
transactionStreamerConfigFetcher := func() *TransactionStreamerConfig { return &configFetcher.Get().TransactionStreamer }
txStreamer, err := NewTransactionStreamer(ctx, arbDb, l2Config, exec, broadcastServer, fatalErrChan, transactionStreamerConfigFetcher, &configFetcher.Get().SnapSyncTest)
txStreamer, err := NewTransactionStreamer(ctx, arbDb, l2Config, exec, broadcastServer, syncMonitor, fatalErrChan, transactionStreamerConfigFetcher, &configFetcher.Get().SnapSyncTest)
Copy link
Contributor

@diegoximenes diegoximenes Aug 22, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

SyncMonitor depends on TransactionStreamer today, and this creates a circular dependency between them 😬.

This indicates to me that they could be better placed in a single component.
How about moving SyncMonitor logic to TransactionStreamer and then dropping SyncMonitor?

I created this linear task in order to move logic from SyncMonitor to ConsensusExecutionSyncer, but it is OK to move it TransactionStreamer 🙂.
ConsensusExecutionSyncer already depends on TransactionStreamer, and can call it for what it needs.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I made a PR to remove the circular dependency: #3538. If you like the approach then I'll rebase this PR ono that.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I am re-reading your comment now and I think I got a bit carried away when I saw NIT-3649 and the work I did on #3538 isn't really related to the circular dependency you're talking about here, rather it's related to breaking the consensus<->execution circular dependency.

I've spent some time thinking about how to break the circular dependency between TransactionStreamer and arbnode.SyncMonitor, and TransactionStreamer and BroadcastSyncChecker and I can't see a good way to do it without moving all the logic into TransactionStreamer, which I don't really like because SyncMonitor already has a fairly clear responsibility. I am thinking about alternatives now...

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I addressed this in 65c6435

Remove TransactionStreamer<->SyncMonitor circ dep
TransactionStreamer now pushes updates to the SyncMonitor of the
currentMessageCount and feedPendingMessageCount every time it updates
either of these in its own state. This means SyncMonitor doesn't need to
have a reference to TransactionStreamer.

The logic for checking if the TransactionStreamer should broadcast to
the feed has been moved into TransactionStreamer, with the core decision
logic split out for testing.

if err != nil {
return nil, err
}
Expand Down Expand Up @@ -1083,7 +1093,7 @@ func createNodeImpl(
return nil, err
}

txStreamer, err := getTransactionStreamer(ctx, arbDb, l2Config, executionClient, broadcastServer, configFetcher, fatalErrChan)
txStreamer, err := getTransactionStreamer(ctx, arbDb, l2Config, executionClient, broadcastServer, syncMonitor, configFetcher, fatalErrChan)
if err != nil {
return nil, err
}
Expand Down Expand Up @@ -1460,9 +1470,9 @@ func (n *Node) Start(ctx context.Context) error {
if n.configFetcher != nil {
n.configFetcher.Start(ctx)
}
// Also make sure to call initialize on the sync monitor after the inbox reader, tx streamer, and block validator are started.
// Else sync might call inbox reader or tx streamer before they are started, and it will lead to panic.
n.SyncMonitor.Initialize(n.InboxReader, n.TxStreamer, n.SeqCoordinator)
// Also make sure to call initialize on the sync monitor after the inbox reader, and block validator are started.
// Else sync might call inbox reader before it is started, and it will lead to panic.
n.SyncMonitor.Initialize(n.InboxReader, n.SeqCoordinator)
n.SyncMonitor.Start(ctx)
if n.ConsensusExecutionSyncer != nil {
n.ConsensusExecutionSyncer.Start(ctx)
Expand Down
40 changes: 19 additions & 21 deletions arbnode/sync_monitor.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ package arbnode
import (
"context"
"sync"
"sync/atomic"
"time"

"github.com/spf13/pflag"
Expand All @@ -17,10 +18,13 @@ type SyncMonitor struct {
stopwaiter.StopWaiter
config func() *SyncMonitorConfig
inboxReader *InboxReader
txStreamer *TransactionStreamer
coordinator *SeqCoordinator
initialized bool

// Updates to these are pushed from the TransactionStreamer using UpdateMessageCount
currentMessageCount atomic.Uint64
feedPendingMessageCount atomic.Uint64

syncTargetLock sync.Mutex
nextSyncTarget arbutil.MessageIndex
syncTarget arbutil.MessageIndex
Expand Down Expand Up @@ -48,13 +52,18 @@ func SyncMonitorConfigAddOptions(prefix string, f *pflag.FlagSet) {
f.Duration(prefix+".msg-lag", DefaultSyncMonitorConfig.MsgLag, "allowed msg lag while still considered in sync")
}

func (s *SyncMonitor) Initialize(inboxReader *InboxReader, txStreamer *TransactionStreamer, coordinator *SeqCoordinator) {
func (s *SyncMonitor) Initialize(inboxReader *InboxReader, coordinator *SeqCoordinator) {
s.inboxReader = inboxReader
s.txStreamer = txStreamer
s.coordinator = coordinator
s.initialized = true
}

// UpdateMessageCount updates the internal message count tracking
func (s *SyncMonitor) UpdateMessageCount(committed, feedPending arbutil.MessageIndex) {
s.currentMessageCount.Store(uint64(committed))
s.feedPendingMessageCount.Store(uint64(feedPending))
}

func (s *SyncMonitor) updateSyncTarget(ctx context.Context) time.Duration {
nextSyncTarget, err := s.maxMessageCount()
s.syncTargetLock.Lock()
Expand Down Expand Up @@ -89,14 +98,10 @@ func (s *SyncMonitor) GetMaxMessageCount() (arbutil.MessageIndex, error) {
}

func (s *SyncMonitor) maxMessageCount() (arbutil.MessageIndex, error) {
msgCount, err := s.txStreamer.GetMessageCount()
if err != nil {
return 0, err
}

pending := s.txStreamer.FeedPendingMessageCount()
if pending > msgCount {
msgCount = pending
msgCount := arbutil.MessageIndex(s.currentMessageCount.Load())
feedPending := arbutil.MessageIndex(s.feedPendingMessageCount.Load())
if feedPending > msgCount {
msgCount = feedPending
}

if s.inboxReader != nil {
Expand Down Expand Up @@ -149,14 +154,10 @@ func (s *SyncMonitor) FullSyncProgressMap() map[string]interface{} {
}
res["maxMessageCount"] = maxMsgCount

msgCount, err := s.txStreamer.GetMessageCount()
if err != nil {
res["msgCountError"] = err.Error()
return res
}
msgCount := arbutil.MessageIndex(s.currentMessageCount.Load())
res["msgCount"] = msgCount

res["feedPendingMessageCount"] = s.txStreamer.FeedPendingMessageCount()
res["feedPendingMessageCount"] = arbutil.MessageIndex(s.feedPendingMessageCount.Load())

if s.inboxReader != nil {
batchSeen := s.inboxReader.GetLastSeenBatchCount()
Expand Down Expand Up @@ -224,10 +225,7 @@ func (s *SyncMonitor) Synced() bool {
return false
}

msgCount, err := s.txStreamer.GetMessageCount()
if err != nil {
return false
}
msgCount := arbutil.MessageIndex(s.currentMessageCount.Load())

if syncTarget > msgCount {
return false
Expand Down
Loading
Loading