Skip to content
Open
Show file tree
Hide file tree
Changes from 9 commits
Commits
Show all changes
56 commits
Select commit Hold shift + click to select a range
4f132e9
Implement Execution/Consensus interface over RPC
KolbyML Sep 10, 2025
969a1b6
Merge branch 'master' into Execution/Consensus-interface-over-RPC
ganeshvanahalli Sep 15, 2025
8219a5e
Merge branch 'master' into Execution/Consensus-interface-over-RPC
ganeshvanahalli Sep 17, 2025
2a18ad2
update implementation, separate out responsibilities of starting a js…
ganeshvanahalli Sep 17, 2025
eaad44a
fix CI flag
ganeshvanahalli Sep 17, 2025
901dbea
Merge branch 'master' into Execution/Consensus-interface-over-RPC
ganeshvanahalli Sep 18, 2025
18097ee
bug fix
ganeshvanahalli Sep 18, 2025
f54a0f0
Merge branch 'master' into Execution/Consensus-interface-over-RPC
ganeshvanahalli Sep 18, 2025
eeb55d3
fix failing tests and refactor code to not use executionClient before…
ganeshvanahalli Sep 18, 2025
f35e01b
merge master and resolve conflicts
ganeshvanahalli Sep 25, 2025
acb0165
address PR comments
ganeshvanahalli Sep 26, 2025
baca94a
address PR comment
ganeshvanahalli Sep 26, 2025
61b8b17
fix challenge tests
ganeshvanahalli Sep 26, 2025
55e3d05
merge upstream master
ganeshvanahalli Oct 8, 2025
7c9c449
minor fix
ganeshvanahalli Oct 8, 2025
e9b35e3
bugfix
ganeshvanahalli Oct 8, 2025
6894bbe
Merge branch 'master' into Execution/Consensus-interface-over-RPC
ganeshvanahalli Oct 9, 2025
b71389d
Merge branch 'master' into Execution/Consensus-interface-over-RPC
ganeshvanahalli Oct 15, 2025
f793c62
Merge branch 'master' into Execution/Consensus-interface-over-RPC
ganeshvanahalli Oct 21, 2025
c65f6bf
merge master and resolve conflicts
ganeshvanahalli Oct 22, 2025
25c70bd
fix failing CI
ganeshvanahalli Oct 22, 2025
6dac9b1
move MessageResult from consensus to execution package
ganeshvanahalli Oct 23, 2025
130a5ab
merge master and resolve conflicts
ganeshvanahalli Oct 23, 2025
c2f6ebd
fix CI
ganeshvanahalli Oct 23, 2025
94aa08c
Merge branch 'master' into Execution/Consensus-interface-over-RPC
ganeshvanahalli Oct 24, 2025
ac38683
address PR comments
ganeshvanahalli Nov 4, 2025
75f885c
address PR comment
ganeshvanahalli Nov 4, 2025
1f8c7c2
merge master and resolve conflicts
ganeshvanahalli Nov 4, 2025
ffe9416
fix lint error
ganeshvanahalli Nov 4, 2025
04b166e
address review
ganeshvanahalli Nov 6, 2025
5201dda
remove import aliases
ganeshvanahalli Nov 6, 2025
61a4017
fix CI
ganeshvanahalli Nov 7, 2025
2df87bc
Merge branch 'master' into Execution/Consensus-interface-over-RPC
ganeshvanahalli Nov 7, 2025
cb34614
resolve PR comments
ganeshvanahalli Nov 7, 2025
3b5fa3b
Merge branch 'master' into Execution/Consensus-interface-over-RPC
ganeshvanahalli Nov 7, 2025
0acc651
fix typo
ganeshvanahalli Nov 7, 2025
f8b8f60
address PR comments
ganeshvanahalli Nov 13, 2025
b71cb29
merge master and resolve conflict
ganeshvanahalli Nov 13, 2025
72458ac
address PR comments
ganeshvanahalli Nov 14, 2025
4501faf
Merge branch 'master' into Execution/Consensus-interface-over-RPC
ganeshvanahalli Nov 14, 2025
cd5bd77
Merge branch 'master' into Execution/Consensus-interface-over-RPC
ganeshvanahalli Dec 4, 2025
3d7a2c5
address PR comments
ganeshvanahalli Dec 4, 2025
6ce9b5b
Merge branch 'master' into Execution/Consensus-interface-over-RPC
ganeshvanahalli Dec 5, 2025
cce9ac7
address PR comments
ganeshvanahalli Dec 6, 2025
ceef817
fix CI
ganeshvanahalli Dec 6, 2025
ac94517
attempt: fix challenge tests CI
ganeshvanahalli Dec 9, 2025
144c44e
address PR comments
ganeshvanahalli Dec 9, 2025
c556785
Merge branch 'master' into Execution/Consensus-interface-over-RPC
ganeshvanahalli Dec 9, 2025
310a61a
ensure correct cleanup in retryableSetup system tests
ganeshvanahalli Dec 9, 2025
667db30
only enable ConsensusExecutionInSameProcessUseRPC over websocket
ganeshvanahalli Dec 10, 2025
3f1cdd1
address PR comments
ganeshvanahalli Dec 10, 2025
b584553
address PR comments
ganeshvanahalli Dec 10, 2025
690b8e7
address PR comments
ganeshvanahalli Dec 12, 2025
219dd11
undo change of default for ConsensusExecutionInSameProcessUseRPC
ganeshvanahalli Dec 12, 2025
04cdeb3
update CI workflow
ganeshvanahalli Dec 12, 2025
20e5888
merge master and resolve conflicts
ganeshvanahalli Dec 15, 2025
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 6 additions & 0 deletions .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -142,6 +142,12 @@ jobs:
run: |
echo "Running tests with Hash Scheme" >> full.log
${{ github.workspace }}/.github/workflows/gotestsum.sh --tags cionly --timeout 60m --test_state_scheme hash

- name: run tests with consensus execution json rpc interconnect enabled
if: matrix.test-mode == 'defaults'
run: |
echo "Running tests with consensus execution json rpc interconnect enabled in Hash Scheme" >> full.log
${{ github.workspace }}/.github/workflows/gotestsum.sh --tags cionly --timeout 60m --test_state_scheme hash --execution_consensus_jsonrpc_interconnect

- name: run redis tests
if: matrix.test-mode == 'defaults'
Expand Down
9 changes: 9 additions & 0 deletions .github/workflows/gotestsum.sh
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ test_state_scheme=""
log=true
race=false
cover=false
execution_consensus_jsonrpc_interconnect=false
while [[ $# -gt 0 ]]; do
case $1 in
--timeout)
Expand Down Expand Up @@ -47,6 +48,10 @@ while [[ $# -gt 0 ]]; do
--cover)
cover=true
shift
;;
--execution_consensus_jsonrpc_interconnect)
execution_consensus_jsonrpc_interconnect=true
shift
;;
--nolog)
log=false
Expand Down Expand Up @@ -88,6 +93,10 @@ for package in $packages; do
cmd="$cmd -args -- --test_loglevel=8" # Use error log level, which is the value 8 in the slog level enum for tests.
fi

if [ "$execution_consensus_jsonrpc_interconnect" == true ]; then
cmd="$cmd --execution_consensus_jsonrpc_interconnect=true"
fi

if [ "$log" == true ]; then
cmd="$cmd > >(stdbuf -oL tee -a full.log | grep -vE \"DEBUG|TRACE|INFO|seal\")"
else
Expand Down
42 changes: 26 additions & 16 deletions arbnode/blockmetadata.go
Original file line number Diff line number Diff line change
Expand Up @@ -71,6 +71,7 @@ type BlockMetadataFetcher struct {
db ethdb.Database
client *rpcclient.RpcClient
exec execution.ExecutionClient
startPos uint64
trackBlockMetadataFrom arbutil.MessageIndex
expectedChainId uint64

Expand All @@ -87,14 +88,7 @@ func NewBlockMetadataFetcher(
startPos uint64,
expectedChainId uint64,
) (*BlockMetadataFetcher, error) {
var trackBlockMetadataFrom arbutil.MessageIndex
var err error
if startPos != 0 {
trackBlockMetadataFrom, err = exec.BlockNumberToMessageIndex(startPos).Await(ctx)
if err != nil {
return nil, err
}
}
client := rpcclient.NewRpcClient(func() *rpcclient.ClientConfig { return &c.Source }, nil)
if err = client.Start(ctx); err != nil {
return nil, err
Expand All @@ -110,14 +104,14 @@ func NewBlockMetadataFetcher(
}

fetcher := &BlockMetadataFetcher{
config: c,
db: db,
client: client,
exec: exec,
trackBlockMetadataFrom: trackBlockMetadataFrom,
expectedChainId: expectedChainId,
chainIdChecked: chainIdChecked,
currentSyncInterval: c.SyncInterval,
config: c,
db: db,
client: client,
exec: exec,
startPos: startPos,
expectedChainId: expectedChainId,
chainIdChecked: chainIdChecked,
currentSyncInterval: c.SyncInterval,
}
return fetcher, nil
}
Expand Down Expand Up @@ -247,9 +241,25 @@ func (b *BlockMetadataFetcher) Update(ctx context.Context) time.Duration {
return b.config.SyncInterval
}

func (b *BlockMetadataFetcher) Start(ctx context.Context) {
// InitializeTrackBlockMetadataFrom is only used for testing purposes
func (b *BlockMetadataFetcher) InitializeTrackBlockMetadataFrom(ctx context.Context) error {
var err error
if b.startPos != 0 {
b.trackBlockMetadataFrom, err = b.exec.BlockNumberToMessageIndex(b.startPos).Await(ctx)
if err != nil {
return err
}
}
return nil
}

func (b *BlockMetadataFetcher) Start(ctx context.Context) error {
b.StopWaiter.Start(ctx, b)
if err := b.InitializeTrackBlockMetadataFrom(ctx); err != nil {
return err
}
b.CallIteratively(b.Update)
return nil
}

func (b *BlockMetadataFetcher) StopAndWait() {
Expand Down
7 changes: 4 additions & 3 deletions arbnode/inbox_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ import (
"github.com/offchainlabs/nitro/arbos/l2pricing"
"github.com/offchainlabs/nitro/arbutil"
"github.com/offchainlabs/nitro/cmd/chaininfo"
"github.com/offchainlabs/nitro/consensus"
"github.com/offchainlabs/nitro/execution"
"github.com/offchainlabs/nitro/execution/gethexec"
"github.com/offchainlabs/nitro/statetransfer"
Expand Down Expand Up @@ -88,19 +89,19 @@ func (w *execClientWrapper) SetFinalityData(
return containers.NewReadyPromise(struct{}{}, nil)
}

func (w *execClientWrapper) DigestMessage(num arbutil.MessageIndex, msg *arbostypes.MessageWithMetadata, msgForPrefetch *arbostypes.MessageWithMetadata) containers.PromiseInterface[*execution.MessageResult] {
func (w *execClientWrapper) DigestMessage(num arbutil.MessageIndex, msg *arbostypes.MessageWithMetadata, msgForPrefetch *arbostypes.MessageWithMetadata) containers.PromiseInterface[*consensus.MessageResult] {
return containers.NewReadyPromise(w.ExecutionEngine.DigestMessage(num, msg, msgForPrefetch))
}

func (w *execClientWrapper) Reorg(count arbutil.MessageIndex, newMessages []arbostypes.MessageWithMetadataAndBlockInfo, oldMessages []*arbostypes.MessageWithMetadata) containers.PromiseInterface[[]*execution.MessageResult] {
func (w *execClientWrapper) Reorg(count arbutil.MessageIndex, newMessages []arbostypes.MessageWithMetadataAndBlockInfo, oldMessages []*arbostypes.MessageWithMetadata) containers.PromiseInterface[[]*consensus.MessageResult] {
return containers.NewReadyPromise(w.ExecutionEngine.Reorg(count, newMessages, oldMessages))
}

func (w *execClientWrapper) HeadMessageIndex() containers.PromiseInterface[arbutil.MessageIndex] {
return containers.NewReadyPromise(w.ExecutionEngine.HeadMessageIndex())
}

func (w *execClientWrapper) ResultAtMessageIndex(pos arbutil.MessageIndex) containers.PromiseInterface[*execution.MessageResult] {
func (w *execClientWrapper) ResultAtMessageIndex(pos arbutil.MessageIndex) containers.PromiseInterface[*consensus.MessageResult] {
return containers.NewReadyPromise(w.ExecutionEngine.ResultAtMessageIndex(pos))
}

Expand Down
101 changes: 71 additions & 30 deletions arbnode/node.go
Original file line number Diff line number Diff line change
Expand Up @@ -37,18 +37,20 @@ import (
"github.com/offchainlabs/nitro/broadcaster"
"github.com/offchainlabs/nitro/cmd/chaininfo"
"github.com/offchainlabs/nitro/cmd/genericconf"
"github.com/offchainlabs/nitro/consensus"
consensusrpcserver "github.com/offchainlabs/nitro/consensus/rpcserver"
"github.com/offchainlabs/nitro/daprovider"
"github.com/offchainlabs/nitro/daprovider/daclient"
"github.com/offchainlabs/nitro/daprovider/das"
"github.com/offchainlabs/nitro/daprovider/das/dasserver"
"github.com/offchainlabs/nitro/execution"
"github.com/offchainlabs/nitro/execution/gethexec"
executionrpcclient "github.com/offchainlabs/nitro/execution/rpcclient"
"github.com/offchainlabs/nitro/solgen/go/bridgegen"
"github.com/offchainlabs/nitro/solgen/go/precompilesgen"
"github.com/offchainlabs/nitro/staker"
"github.com/offchainlabs/nitro/staker/bold"
"github.com/offchainlabs/nitro/staker/legacy"
"github.com/offchainlabs/nitro/staker/multi_protocol"
legacystaker "github.com/offchainlabs/nitro/staker/legacy"
multiprotocolstaker "github.com/offchainlabs/nitro/staker/multi_protocol"
"github.com/offchainlabs/nitro/staker/validatorwallet"
"github.com/offchainlabs/nitro/util/containers"
"github.com/offchainlabs/nitro/util/contracts"
Expand All @@ -59,6 +61,24 @@ import (
"github.com/offchainlabs/nitro/wsbroadcastserver"
)

type RPCServerConfig struct {
Enable bool `koanf:"enable"`
Public bool `koanf:"public"`
Authenticated bool `koanf:"authenticated"`
}

var DefaultRPCServerConfig = RPCServerConfig{
Enable: false,
Public: false,
Authenticated: true,
}

func RPCServerAddOptions(prefix string, f *pflag.FlagSet) {
f.Bool(prefix+".enable", DefaultRPCServerConfig.Enable, "enable consensus node to serve over rpc")
f.Bool(prefix+".public", DefaultRPCServerConfig.Public, "rpc is public")
f.Bool(prefix+".authenticated", DefaultRPCServerConfig.Authenticated, "rpc is authenticated")
}

type Config struct {
Sequencer bool `koanf:"sequencer"`
ParentChainReader headerreader.Config `koanf:"parent-chain-reader" reload:"hot"`
Expand All @@ -80,6 +100,8 @@ type Config struct {
ResourceMgmt resourcemanager.Config `koanf:"resource-mgmt" reload:"hot"`
BlockMetadataFetcher BlockMetadataFetcherConfig `koanf:"block-metadata-fetcher" reload:"hot"`
ConsensusExecutionSyncer ConsensusExecutionSyncerConfig `koanf:"consensus-execution-syncer"`
RPCServer RPCServerConfig `koanf:"rpc-server"`
ExecutionRPCClient rpcclient.ClientConfig `koanf:"execution-rpc-client" reload:"hot"`
// SnapSyncConfig is only used for testing purposes, these should not be configured in production.
SnapSyncTest SnapSyncConfig
}
Expand Down Expand Up @@ -119,6 +141,9 @@ func (c *Config) Validate() error {
if c.TransactionStreamer.TrackBlockMetadataFrom != 0 && !c.BlockMetadataFetcher.Enable {
log.Warn("track-block-metadata-from is set but blockMetadata fetcher is not enabled")
}
if err := c.ExecutionRPCClient.Validate(); err != nil {
return fmt.Errorf("error validating ExecutionRPCClient config: %w", err)
}
return nil
}

Expand Down Expand Up @@ -153,6 +178,8 @@ func ConfigAddOptions(prefix string, f *pflag.FlagSet, feedInputEnable bool, fee
resourcemanager.ConfigAddOptions(prefix+".resource-mgmt", f)
BlockMetadataFetcherConfigAddOptions(prefix+".block-metadata-fetcher", f)
ConsensusExecutionSyncerConfigAddOptions(prefix+".consensus-execution-syncer", f)
RPCServerAddOptions(prefix+".rpc-server", f)
rpcclient.RPCClientAddOptions(prefix+".execution-rpc-client", f, &ConfigDefault.ExecutionRPCClient)
}

var ConfigDefault = Config{
Expand All @@ -177,6 +204,15 @@ var ConfigDefault = Config{
Maintenance: DefaultMaintenanceConfig,
ConsensusExecutionSyncer: DefaultConsensusExecutionSyncerConfig,
SnapSyncTest: DefaultSnapSyncConfig,
RPCServer: DefaultRPCServerConfig,
ExecutionRPCClient: rpcclient.ClientConfig{
URL: "",
JWTSecret: "",
Retries: 3,
RetryErrors: "websocket: close.*|dial tcp .*|.*i/o timeout|.*connection reset by peer|.*connection refused",
ArgLogLimit: 2048,
WebsocketMessageSizeLimit: 256 * 1024 * 1024,
},
}

func ConfigDefaultL1Test() *Config {
Expand Down Expand Up @@ -1228,6 +1264,16 @@ func registerAPIs(currentNode *Node, stack *node.Node) {
Public: false,
})
}
config := currentNode.configFetcher.Get()
if config.RPCServer.Enable {
apis = append(apis, rpc.API{
Namespace: consensus.RPCNamespace,
Version: "1.0",
Service: consensusrpcserver.NewConsensusRpcServer(currentNode),
Public: config.RPCServer.Public,
Authenticated: config.RPCServer.Authenticated,
})
}
stack.RegisterAPIs(apis)
}

Expand All @@ -1251,6 +1297,10 @@ func CreateNodeExecutionClient(
if executionClient == nil {
return nil, errors.New("execution client must be non-nil")
}
if configFetcher.Get().ExecutionRPCClient.URL != "" {
execConfigFetcher := func() *rpcclient.ClientConfig { return &configFetcher.Get().ExecutionRPCClient }
executionClient = executionrpcclient.NewExecutionRpcClient(execConfigFetcher, nil)
}
currentNode, err := createNodeImpl(ctx, stack, executionClient, nil, nil, nil, arbDb, configFetcher, l2Config, l1client, deployInfo, txOptsValidator, txOptsBatchPoster, dataSigner, fatalErrChan, parentChainID, blobReader, latestWasmModuleRoot)
if err != nil {
return nil, err
Expand Down Expand Up @@ -1282,6 +1332,10 @@ func CreateNodeFullExecutionClient(
if (executionClient == nil) || (executionSequencer == nil) || (executionRecorder == nil) || (executionBatchPoster == nil) {
return nil, errors.New("execution client, sequencer, recorder, and batch poster must be non-nil")
}
if configFetcher.Get().ExecutionRPCClient.URL != "" {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

How about being more explicit here, and relying on ExecutionNode and ConsensusExecutionUseRPC instead of ExecutionRPCClient.URL?
Actually, how about renaming ConsensusExecutionUseRPC to something like ConsensusExecutionUseRPCWhenBothAreRunInTheSameProcess, and create a helper function, or variable, like func ConsensusExecutionUseRPC(ConsensusExecutionUseRPCWhenBothAreRunningInTheSameProcess, ExecutionNode), that is used in situations like this one?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Copy link
Contributor

@diegoximenes diegoximenes Nov 5, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

In cmd/nitro.go, nodeConfig.ExecutionNode is being used to decide if the gethexec.ExecutionNode will be created or not.
Here the decision if gethexec.ExecutionNode, which is provided as fullExecutionClient, is going to be used or not, take into consideration another config, ExecutionRPCClient.URL.
However, if nodeConfig.ExecutionNode is set to true, and nodeConfig.ConsensusExecutionUseRPC is set to false, then it doesn't matter if the RPC URLs are set or not.
I mean, if you first validate that, making sure that in this configuration scenario the RPC URLs are not set, as you are already doing, then the code is correct 🙂, but it is hard to read.
As an example, when reading the CreateConsensusNodeConnectedWithFullExecutionClient func, if I want to check if it makes sense to rely on this RPC URL config here, I need to check where/if those used configs are being validated, and the call to those validation procedures are not close to where the call to CreateConsensusNodeConnectedWithFullExecutionClient is being made.
It is more brittle too, if we change the validation of those configs we will need to remember to update all places where this RPC URLs checks are being used.

That is why I proposed to have a unified way to verify if the RPC client should be used, that takes into consideration nodeConfig.ConsensusExecutionUseRPC and nodeConfig.ExecutionNode, possibly through a function, or creating a variable and passing it around, instead of relying on RPC URLs.

Also, the ConsensusExecutionUseRPC name could indicate more clearly that is only used when NodeConfig.ExecutionNode is set to true, that is why I suggested to rename to something like ConsensusExecutionUseRPCWhenBothAreRunInTheSameProcess.
I mean, semantically if NodeConfig.ExecutionNode is set to false, then it makes sense to enforce that ConsensusExecutionUseRPC is true.

WDYT?

execConfigFetcher := func() *rpcclient.ClientConfig { return &configFetcher.Get().ExecutionRPCClient }
executionClient = executionrpcclient.NewExecutionRpcClient(execConfigFetcher, nil)
}
currentNode, err := createNodeImpl(ctx, stack, executionClient, executionSequencer, executionRecorder, executionBatchPoster, arbDb, configFetcher, l2Config, l1client, deployInfo, txOptsValidator, txOptsBatchPoster, dataSigner, fatalErrChan, parentChainID, blobReader, latestWasmModuleRoot)
if err != nil {
return nil, err
Expand All @@ -1291,27 +1345,12 @@ func CreateNodeFullExecutionClient(
}

func (n *Node) Start(ctx context.Context) error {
execClient, ok := n.ExecutionClient.(*gethexec.ExecutionNode)
if !ok {
execClient = nil
}
if execClient != nil {
err := execClient.Initialize(ctx)
if err != nil {
return fmt.Errorf("error initializing exec client: %w", err)
var err error
if execRPCClient, ok := n.ExecutionClient.(*executionrpcclient.ExecutionRpcClient); ok {
if err = execRPCClient.Start(ctx); err != nil {
return fmt.Errorf("error starting exec rpc client: %w", err)
}
}
err := n.Stack.Start()
if err != nil {
return fmt.Errorf("error starting geth stack: %w", err)
}
if execClient != nil {
execClient.SetConsensusClient(n)
}
err = n.ExecutionClient.Start(ctx)
if err != nil {
return fmt.Errorf("error starting exec client: %w", err)
}
if n.BlobReader != nil {
err = n.BlobReader.Initialize(ctx)
if err != nil {
Expand Down Expand Up @@ -1418,7 +1457,10 @@ func (n *Node) Start(ctx context.Context) error {
}()
}
if n.blockMetadataFetcher != nil {
n.blockMetadataFetcher.Start(ctx)
err = n.blockMetadataFetcher.Start(ctx)
if err != nil {
return fmt.Errorf("error starting block metadata fetcher: %w", err)
}
}
if n.configFetcher != nil {
n.configFetcher.Start(ctx)
Expand Down Expand Up @@ -1493,16 +1535,15 @@ func (n *Node) StopAndWait() {
n.dasServerCloseFn()
}
if n.ExecutionClient != nil {
n.ExecutionClient.StopAndWait()
}
if err := n.Stack.Close(); err != nil {
log.Error("error on stack close", "err", err)
if _, ok := n.ExecutionClient.(*executionrpcclient.ExecutionRpcClient); ok {
n.ExecutionClient.StopAndWait()
}
}
}

func (n *Node) FindInboxBatchContainingMessage(message arbutil.MessageIndex) containers.PromiseInterface[execution.InboxBatch] {
func (n *Node) FindInboxBatchContainingMessage(message arbutil.MessageIndex) containers.PromiseInterface[consensus.InboxBatch] {
batchNum, found, err := n.InboxTracker.FindInboxBatchContainingMessage(message)
inboxBatch := execution.InboxBatch{
inboxBatch := consensus.InboxBatch{
BatchNum: batchNum,
Found: found,
}
Expand All @@ -1525,7 +1566,7 @@ func (n *Node) SyncTargetMessageCount() containers.PromiseInterface[arbutil.Mess
return containers.NewReadyPromise(n.SyncMonitor.SyncTargetMessageCount(), nil)
}

func (n *Node) WriteMessageFromSequencer(pos arbutil.MessageIndex, msgWithMeta arbostypes.MessageWithMetadata, msgResult execution.MessageResult, blockMetadata common.BlockMetadata) containers.PromiseInterface[struct{}] {
func (n *Node) WriteMessageFromSequencer(pos arbutil.MessageIndex, msgWithMeta arbostypes.MessageWithMetadata, msgResult consensus.MessageResult, blockMetadata common.BlockMetadata) containers.PromiseInterface[struct{}] {
err := n.TxStreamer.WriteMessageFromSequencer(pos, msgWithMeta, msgResult, blockMetadata)
return containers.NewReadyPromise(struct{}{}, err)
}
Expand Down
Loading
Loading