Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
16 changes: 12 additions & 4 deletions cmd/nitro/nitro.go
Original file line number Diff line number Diff line change
Expand Up @@ -528,7 +528,7 @@ func mainImpl() int {
chainDb,
l2BlockChain,
l1Client,
func() *gethexec.Config { return &liveNodeConfig.Get().Execution },
&ExecutionNodeConfigFetcher{liveNodeConfig},
new(big.Int).SetUint64(nodeConfig.ParentChain.ID),
liveNodeConfig.Get().Node.TransactionStreamer.SyncTillBlock,
)
Expand All @@ -553,7 +553,7 @@ func mainImpl() int {
execNode,
execNode,
arbDb,
&NodeConfigFetcher{liveNodeConfig},
&ConsensusNodeConfigFetcher{liveNodeConfig},
l2BlockChain.Config(),
l1Client,
&rollupAddrs,
Expand Down Expand Up @@ -1087,14 +1087,22 @@ func initReorg(initConfig conf.InitConfig, chainConfig *params.ChainConfig, inbo
return inboxTracker.ReorgBatchesTo(batchCount)
}

type NodeConfigFetcher struct {
type ConsensusNodeConfigFetcher struct {
*genericconf.LiveConfig[*NodeConfig]
}

func (f *NodeConfigFetcher) Get() *arbnode.Config {
func (f *ConsensusNodeConfigFetcher) Get() *arbnode.Config {
return &f.LiveConfig.Get().Node
}

type ExecutionNodeConfigFetcher struct {
*genericconf.LiveConfig[*NodeConfig]
}

func (f *ExecutionNodeConfigFetcher) Get() *gethexec.Config {
return &f.LiveConfig.Get().Execution
}

func checkWasmModuleRootCompatibility(ctx context.Context, wasmConfig valnode.WasmConfig, l1Client *ethclient.Client, rollupAddrs chaininfo.RollupAddresses) error {
// Fetch current on-chain WASM module root
rollupUserLogic, err := rollupgen.NewRollupUserLogic(rollupAddrs.Rollup, l1Client)
Expand Down
24 changes: 13 additions & 11 deletions execution/gethexec/node.go
Original file line number Diff line number Diff line change
Expand Up @@ -214,7 +214,9 @@ var ConfigDefault = Config{
ExposeMultiGas: false,
}

type ConfigFetcher func() *Config
type ConfigFetcher interface {
Get() *Config
}

type ExecutionNode struct {
ChainDB ethdb.Database
Expand All @@ -227,7 +229,7 @@ type ExecutionNode struct {
TxPreChecker *TxPreChecker
TxPublisher TransactionPublisher
ExpressLaneService *expressLaneService
ConfigFetcher ConfigFetcher
configFetcher ConfigFetcher
SyncMonitor *SyncMonitor
ParentChainReader *headerreader.HeaderReader
ClassicOutbox *ClassicOutboxRetriever
Expand All @@ -245,7 +247,7 @@ func CreateExecutionNode(
parentChainID *big.Int,
syncTillBlock uint64,
) (*ExecutionNode, error) {
config := configFetcher()
config := configFetcher.Get()
execEngine, err := NewExecutionEngine(l2BlockChain, syncTillBlock, config.ExposeMultiGas)
if config.EnablePrefetchBlock {
execEngine.EnablePrefetchBlock()
Expand All @@ -263,7 +265,7 @@ func CreateExecutionNode(
var parentChainReader *headerreader.HeaderReader
if l1client != nil && !reflect.ValueOf(l1client).IsNil() {
arbSys, _ := precompilesgen.NewArbSys(types.ArbSysAddress, l1client)
parentChainReader, err = headerreader.New(ctx, l1client, func() *headerreader.Config { return &configFetcher().ParentChainReader }, arbSys)
parentChainReader, err = headerreader.New(ctx, l1client, func() *headerreader.Config { return &configFetcher.Get().ParentChainReader }, arbSys)
if err != nil {
return nil, err
}
Expand All @@ -272,7 +274,7 @@ func CreateExecutionNode(
}

if config.Sequencer.Enable {
seqConfigFetcher := func() *SequencerConfig { return &configFetcher().Sequencer }
seqConfigFetcher := func() *SequencerConfig { return &configFetcher.Get().Sequencer }
sequencer, err = NewSequencer(execEngine, parentChainReader, seqConfigFetcher, parentChainID)
if err != nil {
return nil, err
Expand All @@ -289,7 +291,7 @@ func CreateExecutionNode(
}
}

txprecheckConfigFetcher := func() *TxPreCheckerConfig { return &configFetcher().TxPreChecker }
txprecheckConfigFetcher := func() *TxPreCheckerConfig { return &configFetcher.Get().TxPreChecker }

txPreChecker := NewTxPreChecker(txPublisher, l2BlockChain, txprecheckConfigFetcher)
txPublisher = txPreChecker
Expand Down Expand Up @@ -384,7 +386,7 @@ func CreateExecutionNode(
Sequencer: sequencer,
TxPreChecker: txPreChecker,
TxPublisher: txPublisher,
ConfigFetcher: configFetcher,
configFetcher: configFetcher,
SyncMonitor: syncMon,
ParentChainReader: parentChainReader,
ClassicOutbox: classicOutbox,
Expand All @@ -399,7 +401,7 @@ func (n *ExecutionNode) MarkFeedStart(to arbutil.MessageIndex) containers.Promis
}

func (n *ExecutionNode) Initialize(ctx context.Context) error {
config := n.ConfigFetcher()
config := n.configFetcher.Get()
err := n.ExecEngine.Initialize(config.Caching.StylusLRUCacheCapacity, &config.StylusTarget)
if err != nil {
return fmt.Errorf("error initializing execution engine: %w", err)
Expand Down Expand Up @@ -541,14 +543,14 @@ func (n *ExecutionNode) BlockNumberToMessageIndex(blockNum uint64) containers.Pr
}

func (n *ExecutionNode) ShouldTriggerMaintenance() containers.PromiseInterface[bool] {
return containers.NewReadyPromise(n.ExecEngine.ShouldTriggerMaintenance(n.ConfigFetcher().Caching.TrieTimeLimitBeforeFlushMaintenance), nil)
return containers.NewReadyPromise(n.ExecEngine.ShouldTriggerMaintenance(n.configFetcher.Get().Caching.TrieTimeLimitBeforeFlushMaintenance), nil)
}
func (n *ExecutionNode) MaintenanceStatus() containers.PromiseInterface[*execution.MaintenanceStatus] {
return containers.NewReadyPromise(n.ExecEngine.MaintenanceStatus(), nil)
}

func (n *ExecutionNode) TriggerMaintenance() containers.PromiseInterface[struct{}] {
trieCapLimitBytes := arbmath.SaturatingUMul(uint64(n.ConfigFetcher().Caching.TrieCapLimit), 1024*1024)
trieCapLimitBytes := arbmath.SaturatingUMul(uint64(n.configFetcher.Get().Caching.TrieCapLimit), 1024*1024)
n.ExecEngine.TriggerMaintenance(trieCapLimitBytes)
return containers.NewReadyPromise(struct{}{}, nil)
}
Expand Down Expand Up @@ -577,7 +579,7 @@ func (n *ExecutionNode) SetConsensusSyncData(ctx context.Context, syncData *exec
}

func (n *ExecutionNode) InitializeTimeboost(ctx context.Context, chainConfig *params.ChainConfig) error {
execNodeConfig := n.ConfigFetcher()
execNodeConfig := n.configFetcher.Get()
if execNodeConfig.Sequencer.Timeboost.Enable {
auctionContractAddr := common.HexToAddress(execNodeConfig.Sequencer.Timeboost.AuctionContractAddress)

Expand Down
3 changes: 3 additions & 0 deletions system_tests/batch_poster_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -575,6 +575,7 @@ func testBatchPosterDelayBuffer(t *testing.T, delayBufferEnabled bool) {
// If the delay buffer is disabled, set max delay to zero to force it
CheckBatchCount(t, builder, initialBatchCount+batch)
builder.nodeConfig.BatchPoster.MaxDelay = 0
builder.L2.ConsensusConfigFetcher.Set(builder.nodeConfig)
Comment on lines 577 to +578
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

given that Set always occurs in this pattern: (1) change config (2) set config, we might actually change the fetcher API from Set() to Update(func (*T))

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

for some reason git didnt post my reply to your comment! my bad.
Regarding Update instead of Set I think Set makes more sense like suggested in the ticket https://linear.app/offchain-labs/issue/NIT-3815/system-tests-avoid-races-for-tests-changing-config because what does Update even mean? does it mean we call func on the existing config var stored in the atomic pointer? realize that we arent using mutexs here (to make the process of changing configs fast and efficient we use atomic.Pointer) so changing that config basically means introducing race again.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

nah, the intention was to perform field reassignments on some temp copy and then use atomic.Pointer as we currently do

my motivation was to wrap the 2-instruction pattern within the fetcher API, but maybe it doesn't make that much sense here; waiving

}
// Run batch poster loop again, this one should post a batch
_, err = builder.L2.ConsensusNode.BatchPoster.MaybePostSequencerBatch(ctx)
Expand All @@ -586,6 +587,7 @@ func testBatchPosterDelayBuffer(t *testing.T, delayBufferEnabled bool) {
CheckBatchCount(t, builder, initialBatchCount+batch+1)
if !delayBufferEnabled {
builder.nodeConfig.BatchPoster.MaxDelay = time.Hour
builder.L2.ConsensusConfigFetcher.Set(builder.nodeConfig)
}
}
}
Expand Down Expand Up @@ -629,6 +631,7 @@ func TestBatchPosterDelayBufferDontForceNonDelayedMessages(t *testing.T) {
builder.L2.ConsensusNode.BatchPoster.StopAndWait() // allow us to modify config and call loop at will
// Set delay to zero to force non-delayed messages
builder.nodeConfig.BatchPoster.MaxDelay = 0
builder.L2.ConsensusConfigFetcher.Set(builder.nodeConfig)
_, err := builder.L2.ConsensusNode.BatchPoster.MaybePostSequencerBatch(ctx)
Require(t, err)
for _, tx := range txs {
Expand Down
16 changes: 7 additions & 9 deletions system_tests/bold_challenge_protocol_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -265,7 +265,7 @@ func testChallengeProtocolBOLD(t *testing.T, useExternalSigner bool, spawnerOpts
rawdb.NewTable(l2nodeB.ArbDB, storage.StakerPrefix),
l2nodeB.L1Reader,
&evilOpts,
NewFetcherFromConfig(l2nodeConfig),
NewCommonConfigFetcher(l2nodeConfig),
l2nodeB.SyncMonitor,
l1ChainId,
)
Expand Down Expand Up @@ -654,15 +654,14 @@ func createTestNodeOnL1ForBoldProtocol(
AddValNodeIfNeeded(t, ctx, nodeConfig, true, "", "")

parentChainId, err := l1client.ChainID(ctx)
execConfigFetcher := func() *gethexec.Config { return execConfig }
execNode, err := gethexec.CreateExecutionNode(ctx, l2stack, l2chainDb, l2blockchain, l1client, execConfigFetcher, parentChainId, 0)
execNode, err := gethexec.CreateExecutionNode(ctx, l2stack, l2chainDb, l2blockchain, l1client, NewCommonConfigFetcher(execConfig), parentChainId, 0)
Require(t, err)

Require(t, err)
locator, err := server_common.NewMachineLocator("")
Require(t, err)
currentNode, err = arbnode.CreateNodeFullExecutionClient(
ctx, l2stack, execNode, execNode, execNode, execNode, l2arbDb, NewFetcherFromConfig(nodeConfig), l2blockchain.Config(), l1client,
ctx, l2stack, execNode, execNode, execNode, execNode, l2arbDb, NewCommonConfigFetcher(nodeConfig), l2blockchain.Config(), l1client,
addresses, sequencerTxOptsPtr, sequencerTxOptsPtr, dataSigner, fatalErrChan, parentChainId,
nil, // Blob reader.
locator.LatestWasmModuleRoot(),
Expand All @@ -684,7 +683,7 @@ func createTestNodeOnL1ForBoldProtocol(
rawdb.NewTable(l2arbDb, storage.StakerPrefix),
currentNode.L1Reader,
dpOpts,
NewFetcherFromConfig(nodeConfig),
NewCommonConfigFetcher(nodeConfig),
currentNode.SyncMonitor,
parentChainId,
)
Expand Down Expand Up @@ -872,14 +871,13 @@ func create2ndNodeWithConfigForBoldProtocol(
l2blockchain, err := gethexec.WriteOrTestBlockChain(l2chainDb, coreCacheConfig, initReader, chainConfig, nil, nil, initMessage, &execConfig.TxIndexer, 0)
Require(t, err)

execConfigFetcher := func() *gethexec.Config { return execConfig }
l1ChainId, err := l1client.ChainID(ctx)
Require(t, err)
execNode, err := gethexec.CreateExecutionNode(ctx, l2stack, l2chainDb, l2blockchain, l1client, execConfigFetcher, l1ChainId, 0)
execNode, err := gethexec.CreateExecutionNode(ctx, l2stack, l2chainDb, l2blockchain, l1client, NewCommonConfigFetcher(execConfig), l1ChainId, 0)
Require(t, err)
locator, err := server_common.NewMachineLocator("")
Require(t, err)
l2node, err := arbnode.CreateNodeFullExecutionClient(ctx, l2stack, execNode, execNode, execNode, execNode, l2arbDb, NewFetcherFromConfig(nodeConfig), l2blockchain.Config(), l1client, addresses, &txOpts, &txOpts, dataSigner, fatalErrChan, l1ChainId, nil /* blob reader */, locator.LatestWasmModuleRoot())
l2node, err := arbnode.CreateNodeFullExecutionClient(ctx, l2stack, execNode, execNode, execNode, execNode, l2arbDb, NewCommonConfigFetcher(nodeConfig), l2blockchain.Config(), l1client, addresses, &txOpts, &txOpts, dataSigner, fatalErrChan, l1ChainId, nil /* blob reader */, locator.LatestWasmModuleRoot())
Require(t, err)

l2client := ClientForStack(t, l2stack)
Expand All @@ -896,7 +894,7 @@ func create2ndNodeWithConfigForBoldProtocol(
rawdb.NewTable(l2arbDb, storage.StakerPrefix),
l2node.L1Reader,
&evilOpts,
NewFetcherFromConfig(nodeConfig),
NewCommonConfigFetcher(nodeConfig),
l2node.SyncMonitor,
l1ChainId,
)
Expand Down
8 changes: 4 additions & 4 deletions system_tests/bold_l3_support_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,10 +21,10 @@ import (

"github.com/offchainlabs/nitro/arbnode"
"github.com/offchainlabs/nitro/arbnode/dataposter/storage"
"github.com/offchainlabs/nitro/bold/chain-abstraction/sol-implementation"
"github.com/offchainlabs/nitro/bold/challenge-manager"
solimpl "github.com/offchainlabs/nitro/bold/chain-abstraction/sol-implementation"
challengemanager "github.com/offchainlabs/nitro/bold/challenge-manager"
modes "github.com/offchainlabs/nitro/bold/challenge-manager/types"
"github.com/offchainlabs/nitro/bold/layer2-state-provider"
l2stateprovider "github.com/offchainlabs/nitro/bold/layer2-state-provider"
"github.com/offchainlabs/nitro/bold/util"
"github.com/offchainlabs/nitro/solgen/go/challengeV2gen"
"github.com/offchainlabs/nitro/solgen/go/localgen"
Expand Down Expand Up @@ -251,7 +251,7 @@ func startL3BoldChallengeManager(t *testing.T, ctx context.Context, builder *Nod
rawdb.NewTable(node.ConsensusNode.ArbDB, storage.StakerPrefix),
builder.L3.ConsensusNode.L1Reader,
&txOpts,
NewFetcherFromConfig(builder.nodeConfig),
NewCommonConfigFetcher(builder.nodeConfig),
node.ConsensusNode.SyncMonitor,
builder.L2Info.Signer.ChainID(),
)
Expand Down
10 changes: 5 additions & 5 deletions system_tests/bold_new_challenge_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,12 +21,12 @@ import (

"github.com/offchainlabs/nitro/arbnode"
"github.com/offchainlabs/nitro/arbnode/dataposter/storage"
"github.com/offchainlabs/nitro/bold/chain-abstraction"
"github.com/offchainlabs/nitro/bold/chain-abstraction/sol-implementation"
"github.com/offchainlabs/nitro/bold/challenge-manager"
protocol "github.com/offchainlabs/nitro/bold/chain-abstraction"
solimpl "github.com/offchainlabs/nitro/bold/chain-abstraction/sol-implementation"
challengemanager "github.com/offchainlabs/nitro/bold/challenge-manager"
modes "github.com/offchainlabs/nitro/bold/challenge-manager/types"
"github.com/offchainlabs/nitro/bold/containers/option"
"github.com/offchainlabs/nitro/bold/layer2-state-provider"
l2stateprovider "github.com/offchainlabs/nitro/bold/layer2-state-provider"
"github.com/offchainlabs/nitro/bold/state-commitments/history"
"github.com/offchainlabs/nitro/bold/util"
"github.com/offchainlabs/nitro/solgen/go/challengeV2gen"
Expand Down Expand Up @@ -330,7 +330,7 @@ func startBoldChallengeManager(t *testing.T, ctx context.Context, builder *NodeB
rawdb.NewTable(node.ConsensusNode.ArbDB, storage.StakerPrefix),
node.ConsensusNode.L1Reader,
&txOpts,
NewFetcherFromConfig(builder.nodeConfig),
NewCommonConfigFetcher(builder.nodeConfig),
node.ConsensusNode.SyncMonitor,
builder.L1Info.Signer.ChainID(),
)
Expand Down
Loading
Loading