Skip to content
Open
Show file tree
Hide file tree
Changes from 46 commits
Commits
Show all changes
56 commits
Select commit Hold shift + click to select a range
4f132e9
Implement Execution/Consensus interface over RPC
KolbyML Sep 10, 2025
969a1b6
Merge branch 'master' into Execution/Consensus-interface-over-RPC
ganeshvanahalli Sep 15, 2025
8219a5e
Merge branch 'master' into Execution/Consensus-interface-over-RPC
ganeshvanahalli Sep 17, 2025
2a18ad2
update implementation, separate out responsibilities of starting a js…
ganeshvanahalli Sep 17, 2025
eaad44a
fix CI flag
ganeshvanahalli Sep 17, 2025
901dbea
Merge branch 'master' into Execution/Consensus-interface-over-RPC
ganeshvanahalli Sep 18, 2025
18097ee
bug fix
ganeshvanahalli Sep 18, 2025
f54a0f0
Merge branch 'master' into Execution/Consensus-interface-over-RPC
ganeshvanahalli Sep 18, 2025
eeb55d3
fix failing tests and refactor code to not use executionClient before…
ganeshvanahalli Sep 18, 2025
f35e01b
merge master and resolve conflicts
ganeshvanahalli Sep 25, 2025
acb0165
address PR comments
ganeshvanahalli Sep 26, 2025
baca94a
address PR comment
ganeshvanahalli Sep 26, 2025
61b8b17
fix challenge tests
ganeshvanahalli Sep 26, 2025
55e3d05
merge upstream master
ganeshvanahalli Oct 8, 2025
7c9c449
minor fix
ganeshvanahalli Oct 8, 2025
e9b35e3
bugfix
ganeshvanahalli Oct 8, 2025
6894bbe
Merge branch 'master' into Execution/Consensus-interface-over-RPC
ganeshvanahalli Oct 9, 2025
b71389d
Merge branch 'master' into Execution/Consensus-interface-over-RPC
ganeshvanahalli Oct 15, 2025
f793c62
Merge branch 'master' into Execution/Consensus-interface-over-RPC
ganeshvanahalli Oct 21, 2025
c65f6bf
merge master and resolve conflicts
ganeshvanahalli Oct 22, 2025
25c70bd
fix failing CI
ganeshvanahalli Oct 22, 2025
6dac9b1
move MessageResult from consensus to execution package
ganeshvanahalli Oct 23, 2025
130a5ab
merge master and resolve conflicts
ganeshvanahalli Oct 23, 2025
c2f6ebd
fix CI
ganeshvanahalli Oct 23, 2025
94aa08c
Merge branch 'master' into Execution/Consensus-interface-over-RPC
ganeshvanahalli Oct 24, 2025
ac38683
address PR comments
ganeshvanahalli Nov 4, 2025
75f885c
address PR comment
ganeshvanahalli Nov 4, 2025
1f8c7c2
merge master and resolve conflicts
ganeshvanahalli Nov 4, 2025
ffe9416
fix lint error
ganeshvanahalli Nov 4, 2025
04b166e
address review
ganeshvanahalli Nov 6, 2025
5201dda
remove import aliases
ganeshvanahalli Nov 6, 2025
61a4017
fix CI
ganeshvanahalli Nov 7, 2025
2df87bc
Merge branch 'master' into Execution/Consensus-interface-over-RPC
ganeshvanahalli Nov 7, 2025
cb34614
resolve PR comments
ganeshvanahalli Nov 7, 2025
3b5fa3b
Merge branch 'master' into Execution/Consensus-interface-over-RPC
ganeshvanahalli Nov 7, 2025
0acc651
fix typo
ganeshvanahalli Nov 7, 2025
f8b8f60
address PR comments
ganeshvanahalli Nov 13, 2025
b71cb29
merge master and resolve conflict
ganeshvanahalli Nov 13, 2025
72458ac
address PR comments
ganeshvanahalli Nov 14, 2025
4501faf
Merge branch 'master' into Execution/Consensus-interface-over-RPC
ganeshvanahalli Nov 14, 2025
cd5bd77
Merge branch 'master' into Execution/Consensus-interface-over-RPC
ganeshvanahalli Dec 4, 2025
3d7a2c5
address PR comments
ganeshvanahalli Dec 4, 2025
6ce9b5b
Merge branch 'master' into Execution/Consensus-interface-over-RPC
ganeshvanahalli Dec 5, 2025
cce9ac7
address PR comments
ganeshvanahalli Dec 6, 2025
ceef817
fix CI
ganeshvanahalli Dec 6, 2025
ac94517
attempt: fix challenge tests CI
ganeshvanahalli Dec 9, 2025
144c44e
address PR comments
ganeshvanahalli Dec 9, 2025
c556785
Merge branch 'master' into Execution/Consensus-interface-over-RPC
ganeshvanahalli Dec 9, 2025
310a61a
ensure correct cleanup in retryableSetup system tests
ganeshvanahalli Dec 9, 2025
667db30
only enable ConsensusExecutionInSameProcessUseRPC over websocket
ganeshvanahalli Dec 10, 2025
3f1cdd1
address PR comments
ganeshvanahalli Dec 10, 2025
b584553
address PR comments
ganeshvanahalli Dec 10, 2025
690b8e7
address PR comments
ganeshvanahalli Dec 12, 2025
219dd11
undo change of default for ConsensusExecutionInSameProcessUseRPC
ganeshvanahalli Dec 12, 2025
04cdeb3
update CI workflow
ganeshvanahalli Dec 12, 2025
20e5888
merge master and resolve conflicts
ganeshvanahalli Dec 15, 2025
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 6 additions & 0 deletions .github/workflows/_go-tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -122,6 +122,12 @@ jobs:
echo "One or more tests failed."
exit 1

- name: run tests with consensus and execution nodes connected over json rpc
if: matrix.test-mode == 'defaults'
Copy link
Contributor

@diegoximenes diegoximenes Dec 11, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Is this step being run by CI?

Some possible strategies:

  • Two new steps, one for inputs.run-defaults-a and another for inputs.run-defaults-b. This will likely "double" the CI times related to run-defaults-*.
  • Create other two inputs, something like run-consensus-execution-over-rpc-a, run-consensus-execution-over-rpc-b, that will use hash scheme. This one seems safer right now.
  • Create specific system tests related to consensus and execution communicating through RPC.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

it used to run previously but this PR is pretty old and CI workflow was changed, I updated it to run along with defaults-A and defaults-B

run: >-
${{ github.workspace }}/.github/workflows/gotestsum.sh
--tags cionly --timeout 60m --test_state_scheme hash --consensus_execution_in_same_process_use_rpc

- name: run redis tests
if: inputs.run-defaults-a
run: >-
Expand Down
9 changes: 9 additions & 0 deletions .github/workflows/gotestsum.sh
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ junitfile=""
log=true
race=false
cover=false
consensus_execution_in_same_process_use_rpc=false
flaky=false
while [[ $# -gt 0 ]]; do
case $1 in
Expand Down Expand Up @@ -64,6 +65,10 @@ while [[ $# -gt 0 ]]; do
cover=true
shift
;;
--consensus_execution_in_same_process_use_rpc)
consensus_execution_in_same_process_use_rpc=true
shift
;;
--nolog)
log=false
shift
Expand Down Expand Up @@ -137,6 +142,10 @@ else
cmd="$cmd -args -- --test_loglevel=8" # Use error log level, which is the value 8 in the slog level enum for tests.
fi

if [ "$consensus_execution_in_same_process_use_rpc" == true ]; then
cmd="$cmd --consensus_execution_in_same_process_use_rpc=true"
fi

if [ "$test_database_engine" != "" ]; then
cmd="$cmd --test_database_engine=$test_database_engine"
fi
Expand Down
60 changes: 32 additions & 28 deletions arbnode/blockmetadata.go
Original file line number Diff line number Diff line change
Expand Up @@ -69,8 +69,10 @@ type BlockMetadataFetcher struct {
stopwaiter.StopWaiter
config BlockMetadataFetcherConfig
db ethdb.Database
genesisBlockNum uint64
client *rpcclient.RpcClient
exec execution.ExecutionClient
startBlockNum uint64
trackBlockMetadataFrom arbutil.MessageIndex
expectedChainId uint64

Expand All @@ -83,18 +85,12 @@ func NewBlockMetadataFetcher(
ctx context.Context,
c BlockMetadataFetcherConfig,
db ethdb.Database,
genesisBlockNum uint64,
exec execution.ExecutionClient,
startPos uint64,
startBlockNum uint64,
expectedChainId uint64,
) (*BlockMetadataFetcher, error) {
var trackBlockMetadataFrom arbutil.MessageIndex
var err error
if startPos != 0 {
trackBlockMetadataFrom, err = exec.BlockNumberToMessageIndex(startPos).Await(ctx)
if err != nil {
return nil, err
}
}
client := rpcclient.NewRpcClient(func() *rpcclient.ClientConfig { return &c.Source }, nil)
if err = client.Start(ctx); err != nil {
return nil, err
Expand All @@ -110,14 +106,15 @@ func NewBlockMetadataFetcher(
}

fetcher := &BlockMetadataFetcher{
config: c,
db: db,
client: client,
exec: exec,
trackBlockMetadataFrom: trackBlockMetadataFrom,
expectedChainId: expectedChainId,
chainIdChecked: chainIdChecked,
currentSyncInterval: c.SyncInterval,
config: c,
db: db,
genesisBlockNum: genesisBlockNum,
client: client,
exec: exec,
startBlockNum: startBlockNum,
expectedChainId: expectedChainId,
chainIdChecked: chainIdChecked,
currentSyncInterval: c.SyncInterval,
}
return fetcher, nil
}
Expand Down Expand Up @@ -151,7 +148,7 @@ func (b *BlockMetadataFetcher) persistBlockMetadata(ctx context.Context, query [
batch := b.db.NewBatch()
queryMap := util.ArrayToSet(query)
for _, elem := range result {
pos, err := b.exec.BlockNumberToMessageIndex(elem.BlockNumber).Await(ctx)
pos, err := util.BlockNumberToMessageIndex(elem.BlockNumber, b.genesisBlockNum)
if err != nil {
return err
}
Expand Down Expand Up @@ -184,16 +181,8 @@ func (b *BlockMetadataFetcher) Update(ctx context.Context) time.Duration {
}

handleQuery := func(query []uint64) bool {
fromBlock, err := b.exec.MessageIndexToBlockNumber(arbutil.MessageIndex(query[0])).Await(ctx)
if err != nil {
log.Error("Error getting fromBlock", "err", err)
return false
}
toBlock, err := b.exec.MessageIndexToBlockNumber(arbutil.MessageIndex(query[len(query)-1])).Await(ctx)
if err != nil {
log.Error("Error getting toBlock", "err", err)
return false
}
fromBlock := util.MessageIndexToBlockNumber(arbutil.MessageIndex(query[0]), b.genesisBlockNum)
toBlock := util.MessageIndexToBlockNumber(arbutil.MessageIndex(query[len(query)-1]), b.genesisBlockNum)

result, err := b.fetch(
ctx,
Expand Down Expand Up @@ -247,9 +236,24 @@ func (b *BlockMetadataFetcher) Update(ctx context.Context) time.Duration {
return b.config.SyncInterval
}

func (b *BlockMetadataFetcher) Start(ctx context.Context) {
func (b *BlockMetadataFetcher) InitializeTrackBlockMetadataFrom() error {
var err error
if b.startBlockNum != 0 {
b.trackBlockMetadataFrom, err = util.BlockNumberToMessageIndex(b.startBlockNum, b.genesisBlockNum)
if err != nil {
return err
}
}
return nil
}

func (b *BlockMetadataFetcher) Start(ctx context.Context) error {
b.StopWaiter.Start(ctx, b)
if err := b.InitializeTrackBlockMetadataFrom(); err != nil {
return err
}
b.CallIteratively(b.Update)
return nil
}

func (b *BlockMetadataFetcher) StopAndWait() {
Expand Down
8 changes: 0 additions & 8 deletions arbnode/inbox_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -111,14 +111,6 @@ func (w *execClientWrapper) Start(ctx context.Context) error {
return nil
}

func (w *execClientWrapper) MessageIndexToBlockNumber(messageNum arbutil.MessageIndex) containers.PromiseInterface[uint64] {
return containers.NewReadyPromise(w.ExecutionEngine.MessageIndexToBlockNumber(messageNum), nil)
}

func (w *execClientWrapper) BlockNumberToMessageIndex(blockNum uint64) containers.PromiseInterface[arbutil.MessageIndex] {
return containers.NewReadyPromise(w.ExecutionEngine.BlockNumberToMessageIndex(blockNum))
}

func (w *execClientWrapper) ArbOSVersionForMessageIndex(msgIdx arbutil.MessageIndex) containers.PromiseInterface[uint64] {
return w.ExecutionEngine.ArbOSVersionForMessageIndex(msgIdx)
}
Expand Down
101 changes: 63 additions & 38 deletions arbnode/node.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,13 +33,15 @@ import (
"github.com/offchainlabs/nitro/broadcastclients"
"github.com/offchainlabs/nitro/broadcaster"
"github.com/offchainlabs/nitro/cmd/chaininfo"
"github.com/offchainlabs/nitro/consensus"
"github.com/offchainlabs/nitro/consensus/rpcserver"
"github.com/offchainlabs/nitro/daprovider"
daconfig "github.com/offchainlabs/nitro/daprovider/config"
"github.com/offchainlabs/nitro/daprovider/daclient"
"github.com/offchainlabs/nitro/daprovider/das"
"github.com/offchainlabs/nitro/daprovider/data_streaming"
"github.com/offchainlabs/nitro/execution"
"github.com/offchainlabs/nitro/execution/gethexec"
"github.com/offchainlabs/nitro/execution/rpcclient"
"github.com/offchainlabs/nitro/solgen/go/bridgegen"
"github.com/offchainlabs/nitro/solgen/go/precompilesgen"
"github.com/offchainlabs/nitro/staker"
Expand All @@ -52,6 +54,7 @@ import (
"github.com/offchainlabs/nitro/util/headerreader"
"github.com/offchainlabs/nitro/util/redisutil"
"github.com/offchainlabs/nitro/util/rpcclient"
"github.com/offchainlabs/nitro/util/rpcserver"
"github.com/offchainlabs/nitro/util/signature"
"github.com/offchainlabs/nitro/wsbroadcastserver"
)
Expand All @@ -77,6 +80,8 @@ type Config struct {
ResourceMgmt resourcemanager.Config `koanf:"resource-mgmt" reload:"hot"`
BlockMetadataFetcher BlockMetadataFetcherConfig `koanf:"block-metadata-fetcher" reload:"hot"`
ConsensusExecutionSyncer ConsensusExecutionSyncerConfig `koanf:"consensus-execution-syncer"`
RPCServer rpcserver.Config `koanf:"rpc-server"`
ExecutionRPCClient rpcclient.ClientConfig `koanf:"execution-rpc-client" reload:"hot"`
// SnapSyncConfig is only used for testing purposes, these should not be configured in production.
SnapSyncTest SnapSyncConfig
}
Expand Down Expand Up @@ -119,6 +124,9 @@ func (c *Config) Validate() error {
if c.TransactionStreamer.TrackBlockMetadataFrom != 0 && !c.BlockMetadataFetcher.Enable {
log.Warn("track-block-metadata-from is set but blockMetadata fetcher is not enabled")
}
if err := c.ExecutionRPCClient.Validate(); err != nil {
return fmt.Errorf("error validating ExecutionRPCClient config: %w", err)
}
// Check that sync-interval is not more than msg-lag / 2
if c.ConsensusExecutionSyncer.SyncInterval > c.SyncMonitor.MsgLag/2 {
log.Warn("consensus-execution-syncer.sync-interval is more than half of sync-monitor.msg-lag, which may cause sync issues",
Expand Down Expand Up @@ -159,6 +167,8 @@ func ConfigAddOptions(prefix string, f *pflag.FlagSet, feedInputEnable bool, fee
resourcemanager.ConfigAddOptions(prefix+".resource-mgmt", f)
BlockMetadataFetcherConfigAddOptions(prefix+".block-metadata-fetcher", f)
ConsensusExecutionSyncerConfigAddOptions(prefix+".consensus-execution-syncer", f)
rpcserver.ConfigAddOptions(prefix+".rpc-server", "consensus", f)
rpcclient.RPCClientAddOptions(prefix+".execution-rpc-client", f, &ConfigDefault.ExecutionRPCClient)
}

var ConfigDefault = Config{
Expand All @@ -183,6 +193,15 @@ var ConfigDefault = Config{
Maintenance: DefaultMaintenanceConfig,
ConsensusExecutionSyncer: DefaultConsensusExecutionSyncerConfig,
SnapSyncTest: DefaultSnapSyncConfig,
RPCServer: rpcserver.DefaultConfig,
ExecutionRPCClient: rpcclient.ClientConfig{
URL: "",
JWTSecret: "",
Retries: 3,
RetryErrors: "websocket: close.*|dial tcp .*|.*i/o timeout|.*connection reset by peer|.*connection refused",
ArgLogLimit: 2048,
WebsocketMessageSizeLimit: 256 * 1024 * 1024,
},
}

func ConfigDefaultL1Test() *Config {
Expand Down Expand Up @@ -526,6 +545,7 @@ func getBroadcastClients(
func getBlockMetadataFetcher(
ctx context.Context,
configFetcher ConfigFetcher,
l2Config *params.ChainConfig,
arbDb ethdb.Database,
exec execution.ExecutionClient,
expectedChainId uint64,
Expand All @@ -535,7 +555,7 @@ func getBlockMetadataFetcher(
var blockMetadataFetcher *BlockMetadataFetcher
if config.BlockMetadataFetcher.Enable {
var err error
blockMetadataFetcher, err = NewBlockMetadataFetcher(ctx, config.BlockMetadataFetcher, arbDb, exec, config.TransactionStreamer.TrackBlockMetadataFrom, expectedChainId)
blockMetadataFetcher, err = NewBlockMetadataFetcher(ctx, config.BlockMetadataFetcher, arbDb, l2Config.ArbitrumChainParams.GenesisBlockNum, exec, config.TransactionStreamer.TrackBlockMetadataFrom, expectedChainId)
if err != nil {
return nil, err
}
Expand Down Expand Up @@ -1167,7 +1187,7 @@ func createNodeImpl(
return nil, err
}

blockMetadataFetcher, err := getBlockMetadataFetcher(ctx, configFetcher, arbDb, executionClient, l2Config.ChainID.Uint64())
blockMetadataFetcher, err := getBlockMetadataFetcher(ctx, configFetcher, l2Config, arbDb, executionClient, l2Config.ChainID.Uint64())
if err != nil {
return nil, err
}
Expand Down Expand Up @@ -1324,10 +1344,20 @@ func registerAPIs(currentNode *Node, stack *node.Node) {
Public: false,
})
}
config := currentNode.configFetcher.Get()
if config.RPCServer.Enable {
apis = append(apis, rpc.API{
Namespace: consensus.RPCNamespace,
Version: "1.0",
Service: consensusrpcserver.NewConsensusRPCServer(currentNode),
Public: config.RPCServer.Public,
Authenticated: config.RPCServer.Authenticated,
})
}
stack.RegisterAPIs(apis)
}

func CreateNodeExecutionClient(
func CreateConsensusNodeConnectedWithSimpleExecutionClient(
ctx context.Context,
stack *node.Node,
executionClient execution.ExecutionClient,
Expand All @@ -1344,6 +1374,10 @@ func CreateNodeExecutionClient(
blobReader daprovider.BlobReader,
latestWasmModuleRoot common.Hash,
) (*Node, error) {
if configFetcher.Get().ExecutionRPCClient.URL != "" {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

URL still being used here instead of the directConnect var.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

directConnect var doesnt apply here? because we aren't creating exec node here. This check is just for added safety

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

CreateConsensusNodeConnectedWithSimpleExecutionClient should decide if RPC must be used or not through the useRPC var, in the same wayCreateConsensusNodeConnectedWithFullExecutionClient does.

I don't understand what you mentioned with "This check is just for added safety"

Copy link
Contributor

@ganeshvanahalli ganeshvanahalli Nov 13, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

if nodeConfig.ExecutionNode is false (which is when nitro.go calls this function), useRPC will be true so implying we will have to connect via URL anyway, so extra useRPC check is not needed?

In which case one can choose to connect to external execution client via RPC or just not run with an execution client by keeping the URL empty

Copy link
Contributor

@diegoximenes diegoximenes Nov 14, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

CreateConsensusNodeConnectedWithSimpleExecutionClient is also used by system tests, that always create an ExecutionNode, and that can choose to use RPC or not.

Relying on the URL here is correct 🙂 , but what I am advocating for is not about code correctness, is about improving code, and Nitro config, maintainability and readability.

We can solely rely on URLs.

We don't need the nodeConfig.ExecutionNode and nodeConfig.ConsensusExecutionInSameProcessUseRPC flags.
Example:

  • --execution-rpc-server URL and --execution-rpc-client URL are set is analogous to: --execution-node==true and --consensus-execution-in-same-process-use-rpc==true
  • --execution-rpc-server URL is not set and --execution-rpc-client is set is analogous to: --execution-node==false

It is easier for node operators to understand if an Execution node will be run or not by checking a single config flag, i.e., nodeConfig.ExecutionNode, instead of reasoning about the 4 combinations in which (execution-rpc-server.URL, execution-rpc-client.URL) can be set/not set.

We don't need the useRPC var.
Given that we validate that URLs are set/not set according with nodeConfig.ExecutionNode and nodeConfig.ConsensusExecutionInSameProcessUseRPC, we can rely on using the URL to make a decision here.
But when reading the CreateConsensusNodeConnectedWithSimpleExecutionClient func, if I want to check if it makes sense to rely on this RPC URL config here, I need to check where/if those used configs are being validated, and the call to those validation procedures are not close to where the call to CreateConsensusNodeConnectedWithSimpleExecutionClient is being made.
With a useRPC flag is clearer the behavior that this func should have.
It is more brittle too, if we change the validation of those configs we will need to remember to update all places where this RPC URLs checks are being used, instead of changing in a single place, where the useRPC var is being computed.

WDYT?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

agreed, fixed it

execConfigFetcher := func() *rpcclient.ClientConfig { return &configFetcher.Get().ExecutionRPCClient }
executionClient = executionrpcclient.NewExecutionRPCClient(execConfigFetcher, nil)
}
if executionClient == nil {
return nil, errors.New("execution client must be non-nil")
}
Expand All @@ -1355,13 +1389,10 @@ func CreateNodeExecutionClient(
return currentNode, nil
}

func CreateNodeFullExecutionClient(
func CreateConsensusNodeConnectedWithFullExecutionClient(
ctx context.Context,
stack *node.Node,
executionClient execution.ExecutionClient,
executionSequencer execution.ExecutionSequencer,
executionRecorder execution.ExecutionRecorder,
arbOSVersionGetter execution.ArbOSVersionGetter,
fullExecutionClient execution.FullExecutionClient,
arbDb ethdb.Database,
configFetcher ConfigFetcher,
l2Config *params.ChainConfig,
Expand All @@ -1375,10 +1406,17 @@ func CreateNodeFullExecutionClient(
blobReader daprovider.BlobReader,
latestWasmModuleRoot common.Hash,
) (*Node, error) {
if (executionClient == nil) || (executionSequencer == nil) || (executionRecorder == nil) || (arbOSVersionGetter == nil) {
return nil, errors.New("execution client, sequencer, recorder, and ArbOS version getter must be non-nil")
if fullExecutionClient == nil {
return nil, errors.New("full execution client must be non-nil")
}
var executionClient execution.ExecutionClient
if configFetcher.Get().ExecutionRPCClient.URL != "" {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

How about being more explicit here, and relying on ExecutionNode and ConsensusExecutionUseRPC instead of ExecutionRPCClient.URL?
Actually, how about renaming ConsensusExecutionUseRPC to something like ConsensusExecutionUseRPCWhenBothAreRunInTheSameProcess, and create a helper function, or variable, like func ConsensusExecutionUseRPC(ConsensusExecutionUseRPCWhenBothAreRunningInTheSameProcess, ExecutionNode), that is used in situations like this one?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Copy link
Contributor

@diegoximenes diegoximenes Nov 5, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

In cmd/nitro.go, nodeConfig.ExecutionNode is being used to decide if the gethexec.ExecutionNode will be created or not.
Here the decision if gethexec.ExecutionNode, which is provided as fullExecutionClient, is going to be used or not, take into consideration another config, ExecutionRPCClient.URL.
However, if nodeConfig.ExecutionNode is set to true, and nodeConfig.ConsensusExecutionUseRPC is set to false, then it doesn't matter if the RPC URLs are set or not.
I mean, if you first validate that, making sure that in this configuration scenario the RPC URLs are not set, as you are already doing, then the code is correct 🙂, but it is hard to read.
As an example, when reading the CreateConsensusNodeConnectedWithFullExecutionClient func, if I want to check if it makes sense to rely on this RPC URL config here, I need to check where/if those used configs are being validated, and the call to those validation procedures are not close to where the call to CreateConsensusNodeConnectedWithFullExecutionClient is being made.
It is more brittle too, if we change the validation of those configs we will need to remember to update all places where this RPC URLs checks are being used.

That is why I proposed to have a unified way to verify if the RPC client should be used, that takes into consideration nodeConfig.ConsensusExecutionUseRPC and nodeConfig.ExecutionNode, possibly through a function, or creating a variable and passing it around, instead of relying on RPC URLs.

Also, the ConsensusExecutionUseRPC name could indicate more clearly that is only used when NodeConfig.ExecutionNode is set to true, that is why I suggested to rename to something like ConsensusExecutionUseRPCWhenBothAreRunInTheSameProcess.
I mean, semantically if NodeConfig.ExecutionNode is set to false, then it makes sense to enforce that ConsensusExecutionUseRPC is true.

WDYT?

execConfigFetcher := func() *rpcclient.ClientConfig { return &configFetcher.Get().ExecutionRPCClient }
executionClient = executionrpcclient.NewExecutionRPCClient(execConfigFetcher, nil)
} else {
executionClient = fullExecutionClient
}
currentNode, err := createNodeImpl(ctx, stack, executionClient, executionSequencer, executionRecorder, arbOSVersionGetter, arbDb, configFetcher, l2Config, l1client, deployInfo, txOptsValidator, txOptsBatchPoster, dataSigner, fatalErrChan, parentChainID, blobReader, latestWasmModuleRoot)
currentNode, err := createNodeImpl(ctx, stack, executionClient, fullExecutionClient, fullExecutionClient, fullExecutionClient, arbDb, configFetcher, l2Config, l1client, deployInfo, txOptsValidator, txOptsBatchPoster, dataSigner, fatalErrChan, parentChainID, blobReader, latestWasmModuleRoot)
if err != nil {
return nil, err
}
Expand All @@ -1387,27 +1425,12 @@ func CreateNodeFullExecutionClient(
}

func (n *Node) Start(ctx context.Context) error {
execClient, ok := n.ExecutionClient.(*gethexec.ExecutionNode)
if !ok {
execClient = nil
}
if execClient != nil {
err := execClient.Initialize(ctx)
if err != nil {
return fmt.Errorf("error initializing exec client: %w", err)
var err error
if execRPCClient, ok := n.ExecutionClient.(*executionrpcclient.ExecutionRPCClient); ok {
if err = execRPCClient.Start(ctx); err != nil {
return fmt.Errorf("error starting exec rpc client: %w", err)
}
}
err := n.Stack.Start()
if err != nil {
return fmt.Errorf("error starting geth stack: %w", err)
}
if execClient != nil {
execClient.SetConsensusClient(n)
}
err = n.ExecutionClient.Start(ctx)
if err != nil {
return fmt.Errorf("error starting exec client: %w", err)
}
if n.BlobReader != nil {
err = n.BlobReader.Initialize(ctx)
if err != nil {
Expand Down Expand Up @@ -1516,7 +1539,10 @@ func (n *Node) Start(ctx context.Context) error {
}()
}
if n.blockMetadataFetcher != nil {
n.blockMetadataFetcher.Start(ctx)
err = n.blockMetadataFetcher.Start(ctx)
if err != nil {
return fmt.Errorf("error starting block metadata fetcher: %w", err)
}
}
if n.configFetcher != nil {
n.configFetcher.Start(ctx)
Expand Down Expand Up @@ -1594,16 +1620,15 @@ func (n *Node) StopAndWait() {
n.providerServerCloseFn()
}
if n.ExecutionClient != nil {
n.ExecutionClient.StopAndWait()
}
if err := n.Stack.Close(); err != nil {
log.Error("error on stack close", "err", err)
if _, ok := n.ExecutionClient.(*executionrpcclient.ExecutionRPCClient); ok {
n.ExecutionClient.StopAndWait()
}
}
}

func (n *Node) FindInboxBatchContainingMessage(message arbutil.MessageIndex) containers.PromiseInterface[execution.InboxBatch] {
func (n *Node) FindInboxBatchContainingMessage(message arbutil.MessageIndex) containers.PromiseInterface[consensus.InboxBatch] {
batchNum, found, err := n.InboxTracker.FindInboxBatchContainingMessage(message)
inboxBatch := execution.InboxBatch{
inboxBatch := consensus.InboxBatch{
BatchNum: batchNum,
Found: found,
}
Expand Down
Loading
Loading