diff --git a/cmd/nitro/nitro.go b/cmd/nitro/nitro.go index 9560dd73dd..8f9a3272f2 100644 --- a/cmd/nitro/nitro.go +++ b/cmd/nitro/nitro.go @@ -528,7 +528,7 @@ func mainImpl() int { chainDb, l2BlockChain, l1Client, - func() *gethexec.Config { return &liveNodeConfig.Get().Execution }, + &ExecutionNodeConfigFetcher{liveNodeConfig}, new(big.Int).SetUint64(nodeConfig.ParentChain.ID), liveNodeConfig.Get().Node.TransactionStreamer.SyncTillBlock, ) @@ -553,7 +553,7 @@ func mainImpl() int { execNode, execNode, arbDb, - &NodeConfigFetcher{liveNodeConfig}, + &ConsensusNodeConfigFetcher{liveNodeConfig}, l2BlockChain.Config(), l1Client, &rollupAddrs, @@ -1087,14 +1087,22 @@ func initReorg(initConfig conf.InitConfig, chainConfig *params.ChainConfig, inbo return inboxTracker.ReorgBatchesTo(batchCount) } -type NodeConfigFetcher struct { +type ConsensusNodeConfigFetcher struct { *genericconf.LiveConfig[*NodeConfig] } -func (f *NodeConfigFetcher) Get() *arbnode.Config { +func (f *ConsensusNodeConfigFetcher) Get() *arbnode.Config { return &f.LiveConfig.Get().Node } +type ExecutionNodeConfigFetcher struct { + *genericconf.LiveConfig[*NodeConfig] +} + +func (f *ExecutionNodeConfigFetcher) Get() *gethexec.Config { + return &f.LiveConfig.Get().Execution +} + func checkWasmModuleRootCompatibility(ctx context.Context, wasmConfig valnode.WasmConfig, l1Client *ethclient.Client, rollupAddrs chaininfo.RollupAddresses) error { // Fetch current on-chain WASM module root rollupUserLogic, err := rollupgen.NewRollupUserLogic(rollupAddrs.Rollup, l1Client) diff --git a/execution/gethexec/node.go b/execution/gethexec/node.go index 2359388add..f05ab3eba7 100644 --- a/execution/gethexec/node.go +++ b/execution/gethexec/node.go @@ -214,7 +214,9 @@ var ConfigDefault = Config{ ExposeMultiGas: false, } -type ConfigFetcher func() *Config +type ConfigFetcher interface { + Get() *Config +} type ExecutionNode struct { ChainDB ethdb.Database @@ -227,7 +229,7 @@ type ExecutionNode struct { TxPreChecker *TxPreChecker TxPublisher TransactionPublisher ExpressLaneService *expressLaneService - ConfigFetcher ConfigFetcher + configFetcher ConfigFetcher SyncMonitor *SyncMonitor ParentChainReader *headerreader.HeaderReader ClassicOutbox *ClassicOutboxRetriever @@ -245,7 +247,7 @@ func CreateExecutionNode( parentChainID *big.Int, syncTillBlock uint64, ) (*ExecutionNode, error) { - config := configFetcher() + config := configFetcher.Get() execEngine, err := NewExecutionEngine(l2BlockChain, syncTillBlock, config.ExposeMultiGas) if config.EnablePrefetchBlock { execEngine.EnablePrefetchBlock() @@ -263,7 +265,7 @@ func CreateExecutionNode( var parentChainReader *headerreader.HeaderReader if l1client != nil && !reflect.ValueOf(l1client).IsNil() { arbSys, _ := precompilesgen.NewArbSys(types.ArbSysAddress, l1client) - parentChainReader, err = headerreader.New(ctx, l1client, func() *headerreader.Config { return &configFetcher().ParentChainReader }, arbSys) + parentChainReader, err = headerreader.New(ctx, l1client, func() *headerreader.Config { return &configFetcher.Get().ParentChainReader }, arbSys) if err != nil { return nil, err } @@ -272,7 +274,7 @@ func CreateExecutionNode( } if config.Sequencer.Enable { - seqConfigFetcher := func() *SequencerConfig { return &configFetcher().Sequencer } + seqConfigFetcher := func() *SequencerConfig { return &configFetcher.Get().Sequencer } sequencer, err = NewSequencer(execEngine, parentChainReader, seqConfigFetcher, parentChainID) if err != nil { return nil, err @@ -289,7 +291,7 @@ func CreateExecutionNode( } } - txprecheckConfigFetcher := func() *TxPreCheckerConfig { return &configFetcher().TxPreChecker } + txprecheckConfigFetcher := func() *TxPreCheckerConfig { return &configFetcher.Get().TxPreChecker } txPreChecker := NewTxPreChecker(txPublisher, l2BlockChain, txprecheckConfigFetcher) txPublisher = txPreChecker @@ -384,7 +386,7 @@ func CreateExecutionNode( Sequencer: sequencer, TxPreChecker: txPreChecker, TxPublisher: txPublisher, - ConfigFetcher: configFetcher, + configFetcher: configFetcher, SyncMonitor: syncMon, ParentChainReader: parentChainReader, ClassicOutbox: classicOutbox, @@ -399,7 +401,7 @@ func (n *ExecutionNode) MarkFeedStart(to arbutil.MessageIndex) containers.Promis } func (n *ExecutionNode) Initialize(ctx context.Context) error { - config := n.ConfigFetcher() + config := n.configFetcher.Get() err := n.ExecEngine.Initialize(config.Caching.StylusLRUCacheCapacity, &config.StylusTarget) if err != nil { return fmt.Errorf("error initializing execution engine: %w", err) @@ -541,14 +543,14 @@ func (n *ExecutionNode) BlockNumberToMessageIndex(blockNum uint64) containers.Pr } func (n *ExecutionNode) ShouldTriggerMaintenance() containers.PromiseInterface[bool] { - return containers.NewReadyPromise(n.ExecEngine.ShouldTriggerMaintenance(n.ConfigFetcher().Caching.TrieTimeLimitBeforeFlushMaintenance), nil) + return containers.NewReadyPromise(n.ExecEngine.ShouldTriggerMaintenance(n.configFetcher.Get().Caching.TrieTimeLimitBeforeFlushMaintenance), nil) } func (n *ExecutionNode) MaintenanceStatus() containers.PromiseInterface[*execution.MaintenanceStatus] { return containers.NewReadyPromise(n.ExecEngine.MaintenanceStatus(), nil) } func (n *ExecutionNode) TriggerMaintenance() containers.PromiseInterface[struct{}] { - trieCapLimitBytes := arbmath.SaturatingUMul(uint64(n.ConfigFetcher().Caching.TrieCapLimit), 1024*1024) + trieCapLimitBytes := arbmath.SaturatingUMul(uint64(n.configFetcher.Get().Caching.TrieCapLimit), 1024*1024) n.ExecEngine.TriggerMaintenance(trieCapLimitBytes) return containers.NewReadyPromise(struct{}{}, nil) } @@ -577,7 +579,7 @@ func (n *ExecutionNode) SetConsensusSyncData(ctx context.Context, syncData *exec } func (n *ExecutionNode) InitializeTimeboost(ctx context.Context, chainConfig *params.ChainConfig) error { - execNodeConfig := n.ConfigFetcher() + execNodeConfig := n.configFetcher.Get() if execNodeConfig.Sequencer.Timeboost.Enable { auctionContractAddr := common.HexToAddress(execNodeConfig.Sequencer.Timeboost.AuctionContractAddress) diff --git a/system_tests/batch_poster_test.go b/system_tests/batch_poster_test.go index 29cffde5f6..c314eb130e 100644 --- a/system_tests/batch_poster_test.go +++ b/system_tests/batch_poster_test.go @@ -575,6 +575,7 @@ func testBatchPosterDelayBuffer(t *testing.T, delayBufferEnabled bool) { // If the delay buffer is disabled, set max delay to zero to force it CheckBatchCount(t, builder, initialBatchCount+batch) builder.nodeConfig.BatchPoster.MaxDelay = 0 + builder.L2.ConsensusConfigFetcher.Set(builder.nodeConfig) } // Run batch poster loop again, this one should post a batch _, err = builder.L2.ConsensusNode.BatchPoster.MaybePostSequencerBatch(ctx) @@ -586,6 +587,7 @@ func testBatchPosterDelayBuffer(t *testing.T, delayBufferEnabled bool) { CheckBatchCount(t, builder, initialBatchCount+batch+1) if !delayBufferEnabled { builder.nodeConfig.BatchPoster.MaxDelay = time.Hour + builder.L2.ConsensusConfigFetcher.Set(builder.nodeConfig) } } } @@ -629,6 +631,7 @@ func TestBatchPosterDelayBufferDontForceNonDelayedMessages(t *testing.T) { builder.L2.ConsensusNode.BatchPoster.StopAndWait() // allow us to modify config and call loop at will // Set delay to zero to force non-delayed messages builder.nodeConfig.BatchPoster.MaxDelay = 0 + builder.L2.ConsensusConfigFetcher.Set(builder.nodeConfig) _, err := builder.L2.ConsensusNode.BatchPoster.MaybePostSequencerBatch(ctx) Require(t, err) for _, tx := range txs { diff --git a/system_tests/bold_challenge_protocol_test.go b/system_tests/bold_challenge_protocol_test.go index 20bfc24a13..73c6a0f127 100644 --- a/system_tests/bold_challenge_protocol_test.go +++ b/system_tests/bold_challenge_protocol_test.go @@ -265,7 +265,7 @@ func testChallengeProtocolBOLD(t *testing.T, useExternalSigner bool, spawnerOpts rawdb.NewTable(l2nodeB.ArbDB, storage.StakerPrefix), l2nodeB.L1Reader, &evilOpts, - NewFetcherFromConfig(l2nodeConfig), + NewCommonConfigFetcher(l2nodeConfig), l2nodeB.SyncMonitor, l1ChainId, ) @@ -654,15 +654,14 @@ func createTestNodeOnL1ForBoldProtocol( AddValNodeIfNeeded(t, ctx, nodeConfig, true, "", "") parentChainId, err := l1client.ChainID(ctx) - execConfigFetcher := func() *gethexec.Config { return execConfig } - execNode, err := gethexec.CreateExecutionNode(ctx, l2stack, l2chainDb, l2blockchain, l1client, execConfigFetcher, parentChainId, 0) + execNode, err := gethexec.CreateExecutionNode(ctx, l2stack, l2chainDb, l2blockchain, l1client, NewCommonConfigFetcher(execConfig), parentChainId, 0) Require(t, err) Require(t, err) locator, err := server_common.NewMachineLocator("") Require(t, err) currentNode, err = arbnode.CreateNodeFullExecutionClient( - ctx, l2stack, execNode, execNode, execNode, execNode, l2arbDb, NewFetcherFromConfig(nodeConfig), l2blockchain.Config(), l1client, + ctx, l2stack, execNode, execNode, execNode, execNode, l2arbDb, NewCommonConfigFetcher(nodeConfig), l2blockchain.Config(), l1client, addresses, sequencerTxOptsPtr, sequencerTxOptsPtr, dataSigner, fatalErrChan, parentChainId, nil, // Blob reader. locator.LatestWasmModuleRoot(), @@ -684,7 +683,7 @@ func createTestNodeOnL1ForBoldProtocol( rawdb.NewTable(l2arbDb, storage.StakerPrefix), currentNode.L1Reader, dpOpts, - NewFetcherFromConfig(nodeConfig), + NewCommonConfigFetcher(nodeConfig), currentNode.SyncMonitor, parentChainId, ) @@ -872,14 +871,13 @@ func create2ndNodeWithConfigForBoldProtocol( l2blockchain, err := gethexec.WriteOrTestBlockChain(l2chainDb, coreCacheConfig, initReader, chainConfig, nil, nil, initMessage, &execConfig.TxIndexer, 0) Require(t, err) - execConfigFetcher := func() *gethexec.Config { return execConfig } l1ChainId, err := l1client.ChainID(ctx) Require(t, err) - execNode, err := gethexec.CreateExecutionNode(ctx, l2stack, l2chainDb, l2blockchain, l1client, execConfigFetcher, l1ChainId, 0) + execNode, err := gethexec.CreateExecutionNode(ctx, l2stack, l2chainDb, l2blockchain, l1client, NewCommonConfigFetcher(execConfig), l1ChainId, 0) Require(t, err) locator, err := server_common.NewMachineLocator("") Require(t, err) - l2node, err := arbnode.CreateNodeFullExecutionClient(ctx, l2stack, execNode, execNode, execNode, execNode, l2arbDb, NewFetcherFromConfig(nodeConfig), l2blockchain.Config(), l1client, addresses, &txOpts, &txOpts, dataSigner, fatalErrChan, l1ChainId, nil /* blob reader */, locator.LatestWasmModuleRoot()) + l2node, err := arbnode.CreateNodeFullExecutionClient(ctx, l2stack, execNode, execNode, execNode, execNode, l2arbDb, NewCommonConfigFetcher(nodeConfig), l2blockchain.Config(), l1client, addresses, &txOpts, &txOpts, dataSigner, fatalErrChan, l1ChainId, nil /* blob reader */, locator.LatestWasmModuleRoot()) Require(t, err) l2client := ClientForStack(t, l2stack) @@ -896,7 +894,7 @@ func create2ndNodeWithConfigForBoldProtocol( rawdb.NewTable(l2arbDb, storage.StakerPrefix), l2node.L1Reader, &evilOpts, - NewFetcherFromConfig(nodeConfig), + NewCommonConfigFetcher(nodeConfig), l2node.SyncMonitor, l1ChainId, ) diff --git a/system_tests/bold_l3_support_test.go b/system_tests/bold_l3_support_test.go index 0f3ede61bb..982cca6653 100644 --- a/system_tests/bold_l3_support_test.go +++ b/system_tests/bold_l3_support_test.go @@ -21,10 +21,10 @@ import ( "github.com/offchainlabs/nitro/arbnode" "github.com/offchainlabs/nitro/arbnode/dataposter/storage" - "github.com/offchainlabs/nitro/bold/chain-abstraction/sol-implementation" - "github.com/offchainlabs/nitro/bold/challenge-manager" + solimpl "github.com/offchainlabs/nitro/bold/chain-abstraction/sol-implementation" + challengemanager "github.com/offchainlabs/nitro/bold/challenge-manager" modes "github.com/offchainlabs/nitro/bold/challenge-manager/types" - "github.com/offchainlabs/nitro/bold/layer2-state-provider" + l2stateprovider "github.com/offchainlabs/nitro/bold/layer2-state-provider" "github.com/offchainlabs/nitro/bold/util" "github.com/offchainlabs/nitro/solgen/go/challengeV2gen" "github.com/offchainlabs/nitro/solgen/go/localgen" @@ -251,7 +251,7 @@ func startL3BoldChallengeManager(t *testing.T, ctx context.Context, builder *Nod rawdb.NewTable(node.ConsensusNode.ArbDB, storage.StakerPrefix), builder.L3.ConsensusNode.L1Reader, &txOpts, - NewFetcherFromConfig(builder.nodeConfig), + NewCommonConfigFetcher(builder.nodeConfig), node.ConsensusNode.SyncMonitor, builder.L2Info.Signer.ChainID(), ) diff --git a/system_tests/bold_new_challenge_test.go b/system_tests/bold_new_challenge_test.go index 6a55dd0d44..7ef039d129 100644 --- a/system_tests/bold_new_challenge_test.go +++ b/system_tests/bold_new_challenge_test.go @@ -21,12 +21,12 @@ import ( "github.com/offchainlabs/nitro/arbnode" "github.com/offchainlabs/nitro/arbnode/dataposter/storage" - "github.com/offchainlabs/nitro/bold/chain-abstraction" - "github.com/offchainlabs/nitro/bold/chain-abstraction/sol-implementation" - "github.com/offchainlabs/nitro/bold/challenge-manager" + protocol "github.com/offchainlabs/nitro/bold/chain-abstraction" + solimpl "github.com/offchainlabs/nitro/bold/chain-abstraction/sol-implementation" + challengemanager "github.com/offchainlabs/nitro/bold/challenge-manager" modes "github.com/offchainlabs/nitro/bold/challenge-manager/types" "github.com/offchainlabs/nitro/bold/containers/option" - "github.com/offchainlabs/nitro/bold/layer2-state-provider" + l2stateprovider "github.com/offchainlabs/nitro/bold/layer2-state-provider" "github.com/offchainlabs/nitro/bold/state-commitments/history" "github.com/offchainlabs/nitro/bold/util" "github.com/offchainlabs/nitro/solgen/go/challengeV2gen" @@ -330,7 +330,7 @@ func startBoldChallengeManager(t *testing.T, ctx context.Context, builder *NodeB rawdb.NewTable(node.ConsensusNode.ArbDB, storage.StakerPrefix), node.ConsensusNode.L1Reader, &txOpts, - NewFetcherFromConfig(builder.nodeConfig), + NewCommonConfigFetcher(builder.nodeConfig), node.ConsensusNode.SyncMonitor, builder.L1Info.Signer.ChainID(), ) diff --git a/system_tests/common_test.go b/system_tests/common_test.go index ed3d5a6441..79e7077fb7 100644 --- a/system_tests/common_test.go +++ b/system_tests/common_test.go @@ -7,6 +7,7 @@ import ( "bytes" "context" "encoding/binary" + "encoding/gob" "encoding/hex" "encoding/json" "flag" @@ -106,13 +107,15 @@ type SecondNodeParams struct { } type TestClient struct { - ctx context.Context - Client *ethclient.Client - L1Backend *eth.Ethereum - Stack *node.Node - ConsensusNode *arbnode.Node - ExecNode *gethexec.ExecutionNode - ClientWrapper *ClientWrapper + ctx context.Context + Client *ethclient.Client + L1Backend *eth.Ethereum + Stack *node.Node + ConsensusNode *arbnode.Node + ExecNode *gethexec.ExecutionNode + ClientWrapper *ClientWrapper + ConsensusConfigFetcher ConfigFetcher[arbnode.Config] + ExecutionConfigFetcher ConfigFetcher[gethexec.Config] // having cleanup() field makes cleanup customizable from default cleanup methods after calling build cleanup func() @@ -669,19 +672,20 @@ func buildOnParentChain( AddValNodeIfNeeded(t, ctx, nodeConfig, true, "", valnodeConfig.Wasm.RootPath) - Require(t, execConfig.Validate()) - execConfigToBeUsedInConfigFetcher := execConfig - execConfigFetcher := func() *gethexec.Config { return execConfigToBeUsedInConfigFetcher } + execConfigFetcher := NewCommonConfigFetcher(execConfig) execNode, err := gethexec.CreateExecutionNode(ctx, chainTestClient.Stack, chainDb, blockchain, parentChainTestClient.Client, execConfigFetcher, parentChainId, 0) Require(t, err) + chainTestClient.ExecutionConfigFetcher = execConfigFetcher fatalErrChan := make(chan error, 10) locator, err := server_common.NewMachineLocator(valnodeConfig.Wasm.RootPath) Require(t, err) + consensusConfigFetcher := NewCommonConfigFetcher(nodeConfig) chainTestClient.ConsensusNode, err = arbnode.CreateNodeFullExecutionClient( - ctx, chainTestClient.Stack, execNode, execNode, execNode, execNode, arbDb, NewFetcherFromConfig(nodeConfig), blockchain.Config(), parentChainTestClient.Client, + ctx, chainTestClient.Stack, execNode, execNode, execNode, execNode, arbDb, consensusConfigFetcher, blockchain.Config(), parentChainTestClient.Client, addresses, validatorTxOptsPtr, sequencerTxOptsPtr, dataSigner, fatalErrChan, parentChainId, nil, locator.LatestWasmModuleRoot()) Require(t, err) + chainTestClient.ConsensusConfigFetcher = consensusConfigFetcher err = chainTestClient.ConsensusNode.Start(ctx) Require(t, err) @@ -815,19 +819,20 @@ func (b *NodeBuilder) BuildL2(t *testing.T) func() { b.L2Info, b.L2.Stack, chainDb, arbDb, blockchain = createNonL1BlockChainWithStackConfig( t, b.L2Info, b.dataDir, b.chainConfig, b.arbOSInit, nil, b.l2StackConfig, b.execConfig) - Require(t, b.execConfig.Validate()) - execConfig := b.execConfig - execConfigFetcher := func() *gethexec.Config { return execConfig } + execConfigFetcher := NewCommonConfigFetcher(b.execConfig) execNode, err := gethexec.CreateExecutionNode(b.ctx, b.L2.Stack, chainDb, blockchain, nil, execConfigFetcher, big.NewInt(1337), 0) Require(t, err) + b.L2.ExecutionConfigFetcher = execConfigFetcher fatalErrChan := make(chan error, 10) locator, err := server_common.NewMachineLocator(b.valnodeConfig.Wasm.RootPath) Require(t, err) + consensusConfigFetcher := NewCommonConfigFetcher(b.nodeConfig) b.L2.ConsensusNode, err = arbnode.CreateNodeFullExecutionClient( - b.ctx, b.L2.Stack, execNode, execNode, execNode, execNode, arbDb, NewFetcherFromConfig(b.nodeConfig), blockchain.Config(), + b.ctx, b.L2.Stack, execNode, execNode, execNode, execNode, arbDb, consensusConfigFetcher, blockchain.Config(), nil, nil, nil, nil, nil, fatalErrChan, big.NewInt(1337), nil, locator.LatestWasmModuleRoot()) Require(t, err) + b.L2.ConsensusConfigFetcher = consensusConfigFetcher // Give the node an init message err = b.L2.ConsensusNode.TxStreamer.AddFakeInitMessage() @@ -871,7 +876,7 @@ func (b *NodeBuilder) RestartL2Node(t *testing.T) { l2info, stack, chainDb, arbDb, blockchain := createNonL1BlockChainWithStackConfig(t, b.L2Info, b.dataDir, b.chainConfig, b.arbOSInit, b.initMessage, b.l2StackConfig, b.execConfig) - execConfigFetcher := func() *gethexec.Config { return b.execConfig } + execConfigFetcher := NewCommonConfigFetcher(b.execConfig) execNode, err := gethexec.CreateExecutionNode(b.ctx, stack, chainDb, blockchain, nil, execConfigFetcher, big.NewInt(1337), 0) Require(t, err) @@ -890,7 +895,8 @@ func (b *NodeBuilder) RestartL2Node(t *testing.T) { dataSigner = signature.DataSignerFromPrivateKey(b.L1Info.GetInfoWithPrivKey("Sequencer").PrivateKey) l1Client = b.L1.Client } - currentNode, err := arbnode.CreateNodeFullExecutionClient(b.ctx, stack, execNode, execNode, execNode, execNode, arbDb, NewFetcherFromConfig(b.nodeConfig), blockchain.Config(), l1Client, b.addresses, validatorTxOpts, sequencerTxOpts, dataSigner, feedErrChan, big.NewInt(1337), nil, locator.LatestWasmModuleRoot()) + consensusConfigFetcher := NewCommonConfigFetcher(b.nodeConfig) + currentNode, err := arbnode.CreateNodeFullExecutionClient(b.ctx, stack, execNode, execNode, execNode, execNode, arbDb, consensusConfigFetcher, blockchain.Config(), l1Client, b.addresses, validatorTxOpts, sequencerTxOpts, dataSigner, feedErrChan, big.NewInt(1337), nil, locator.LatestWasmModuleRoot()) Require(t, err) Require(t, currentNode.Start(b.ctx)) @@ -904,6 +910,8 @@ func (b *NodeBuilder) RestartL2Node(t *testing.T) { l2.ExecNode = execNode l2.cleanup = func() { b.L2.ConsensusNode.StopAndWait() } l2.Stack = stack + l2.ExecutionConfigFetcher = execConfigFetcher + l2.ConsensusConfigFetcher = consensusConfigFetcher b.L2 = l2 b.L2Info = l2info @@ -960,7 +968,7 @@ func build2ndNode( } testClient := NewTestClient(ctx) - testClient.Client, testClient.ConsensusNode = + testClient.Client, testClient.ConsensusNode, testClient.ExecutionConfigFetcher, testClient.ConsensusConfigFetcher = Create2ndNodeWithConfig(t, ctx, firstNodeTestClient.ConsensusNode, parentChainTestClient.Stack, parentChainInfo, params.initData, params.nodeConfig, params.execConfig, params.stackConfig, valnodeConfig, params.addresses, initMessage, params.useExecutionClientOnly) testClient.ExecNode = getExecNode(t, testClient.ConsensusNode) testClient.cleanup = func() { testClient.ConsensusNode.StopAndWait() } @@ -1281,30 +1289,57 @@ func (l *lifecycle) Stop() error { return nil } -type staticNodeConfigFetcher struct { - config *arbnode.Config +type ConfigFetcher[T any] interface { + Set(*T) + Get() *T + Start(context.Context) + StopAndWait() + Started() bool } -func NewFetcherFromConfig(c *arbnode.Config) *staticNodeConfigFetcher { - err := c.Validate() - if err != nil { - panic("invalid static config: " + err.Error()) - } - return &staticNodeConfigFetcher{c} +type commonConfigFetcher[T any] struct { + config atomic.Pointer[T] } -func (c *staticNodeConfigFetcher) Get() *arbnode.Config { - return c.config +func cloneAndValidateConfig[T any](cfg *T) *T { + var b bytes.Buffer + if err := gob.NewEncoder(&b).Encode(cfg); err != nil { + panic("error marshalling config using gob: " + err.Error()) + } + data := b.Bytes() + var clonedCfg T + if err := gob.NewDecoder(bytes.NewReader(data)).Decode(&clonedCfg); err != nil { + panic("error unmarshalling bytes using gob into clonedCfg: " + err.Error()) + } + if v, ok := any(&clonedCfg).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + panic("invalid cloned config: " + err.Error()) + } + } + return &clonedCfg } -func (c *staticNodeConfigFetcher) Start(context.Context) {} +func NewCommonConfigFetcher[T any](cfg *T) ConfigFetcher[T] { + clonedCfg := cloneAndValidateConfig(cfg) + fetcher := &commonConfigFetcher[T]{} + fetcher.config.Store(clonedCfg) + return fetcher +} -func (c *staticNodeConfigFetcher) StopAndWait() {} +// Set first clones the cfg and then stores the new config atomically +func (fetcher *commonConfigFetcher[T]) Set(cfg *T) { + clonedCfg := cloneAndValidateConfig(cfg) + fetcher.config.Store(clonedCfg) +} -func (c *staticNodeConfigFetcher) Started() bool { - return true +func (fetcher *commonConfigFetcher[T]) Get() *T { + return fetcher.config.Load() } +func (fetcher *commonConfigFetcher[T]) Start(context.Context) {} +func (fetcher *commonConfigFetcher[T]) StopAndWait() {} +func (fetcher *commonConfigFetcher[T]) Started() bool { return true } + func createRedisGroup(ctx context.Context, t *testing.T, streamName string, client redis.UniversalClient) { t.Helper() // Stream name and group name are the same. @@ -1749,7 +1784,7 @@ func Create2ndNodeWithConfig( addresses *chaininfo.RollupAddresses, initMessage *arbostypes.ParsedInitMessage, useExecutionClientOnly bool, -) (*ethclient.Client, *arbnode.Node) { +) (*ethclient.Client, *arbnode.Node, ConfigFetcher[gethexec.Config], ConfigFetcher[arbnode.Config]) { if nodeConfig == nil { nodeConfig = arbnode.ConfigDefaultL1NonSequencerTest() } @@ -1796,18 +1831,18 @@ func Create2ndNodeWithConfig( AddValNodeIfNeeded(t, ctx, nodeConfig, true, "", valnodeConfig.Wasm.RootPath) - Require(t, nodeConfig.Validate()) - configFetcher := func() *gethexec.Config { return execConfig } - currentExec, err := gethexec.CreateExecutionNode(ctx, chainStack, chainDb, blockchain, parentChainClient, configFetcher, big.NewInt(1337), 0) + execConfigFetcher := NewCommonConfigFetcher(execConfig) + currentExec, err := gethexec.CreateExecutionNode(ctx, chainStack, chainDb, blockchain, parentChainClient, execConfigFetcher, big.NewInt(1337), 0) Require(t, err) var currentNode *arbnode.Node locator, err := server_common.NewMachineLocator(valnodeConfig.Wasm.RootPath) Require(t, err) + consensusConfigFetcher := NewCommonConfigFetcher(nodeConfig) if useExecutionClientOnly { - currentNode, err = arbnode.CreateNodeExecutionClient(ctx, chainStack, currentExec, arbDb, NewFetcherFromConfig(nodeConfig), blockchain.Config(), parentChainClient, addresses, &validatorTxOpts, &sequencerTxOpts, dataSigner, feedErrChan, big.NewInt(1337), nil, locator.LatestWasmModuleRoot()) + currentNode, err = arbnode.CreateNodeExecutionClient(ctx, chainStack, currentExec, arbDb, consensusConfigFetcher, blockchain.Config(), parentChainClient, addresses, &validatorTxOpts, &sequencerTxOpts, dataSigner, feedErrChan, big.NewInt(1337), nil, locator.LatestWasmModuleRoot()) } else { - currentNode, err = arbnode.CreateNodeFullExecutionClient(ctx, chainStack, currentExec, currentExec, currentExec, currentExec, arbDb, NewFetcherFromConfig(nodeConfig), blockchain.Config(), parentChainClient, addresses, &validatorTxOpts, &sequencerTxOpts, dataSigner, feedErrChan, big.NewInt(1337), nil, locator.LatestWasmModuleRoot()) + currentNode, err = arbnode.CreateNodeFullExecutionClient(ctx, chainStack, currentExec, currentExec, currentExec, currentExec, arbDb, consensusConfigFetcher, blockchain.Config(), parentChainClient, addresses, &validatorTxOpts, &sequencerTxOpts, dataSigner, feedErrChan, big.NewInt(1337), nil, locator.LatestWasmModuleRoot()) } Require(t, err) @@ -1818,7 +1853,7 @@ func Create2ndNodeWithConfig( StartWatchChanErr(t, ctx, feedErrChan, currentNode) - return chainClient, currentNode + return chainClient, currentNode, execConfigFetcher, consensusConfigFetcher } func GetBalance(t *testing.T, ctx context.Context, client *ethclient.Client, account common.Address) *big.Int { diff --git a/system_tests/das_test.go b/system_tests/das_test.go index bc479a5dc3..f0c21f59f3 100644 --- a/system_tests/das_test.go +++ b/system_tests/das_test.go @@ -364,7 +364,8 @@ func TestDASBatchPosterFallback(t *testing.T) { // Enable the DAP fallback and check the transaction on the second node. // (We don't need to restart the node because of the hot-reload.) builder.nodeConfig.BatchPoster.DisableDapFallbackStoreDataOnChain = false - _, err = WaitForTx(ctx, l2B.Client, tx.Hash(), time.Second*3) + builder.L2.ConsensusConfigFetcher.Set(builder.nodeConfig) + _, err = WaitForTx(ctx, l2B.Client, tx.Hash(), time.Second*5) Require(t, err) l2balance, err := l2B.Client.BalanceAt(ctx, l2info.GetAddress("User2"), nil) Require(t, err) diff --git a/system_tests/fast_confirm_test.go b/system_tests/fast_confirm_test.go index 208ebcbad6..b247438a6e 100644 --- a/system_tests/fast_confirm_test.go +++ b/system_tests/fast_confirm_test.go @@ -37,7 +37,7 @@ import ( "github.com/offchainlabs/nitro/solgen/go/rollup_legacy_gen" "github.com/offchainlabs/nitro/solgen/go/upgrade_executorgen" "github.com/offchainlabs/nitro/staker" - "github.com/offchainlabs/nitro/staker/legacy" + legacystaker "github.com/offchainlabs/nitro/staker/legacy" "github.com/offchainlabs/nitro/staker/validatorwallet" "github.com/offchainlabs/nitro/util" "github.com/offchainlabs/nitro/validator/server_common" @@ -227,7 +227,7 @@ func setupFastConfirmation(ctx context.Context, t *testing.T) (*NodeBuilder, *le ctx, rawdb.NewTable(l2node.ArbDB, storage.StakerPrefix), l2node.L1Reader, - &l1auth, NewFetcherFromConfig(arbnode.ConfigDefaultL1NonSequencerTest()), + &l1auth, NewCommonConfigFetcher(arbnode.ConfigDefaultL1NonSequencerTest()), nil, parentChainID, ) @@ -421,7 +421,7 @@ func TestFastConfirmationWithSafe(t *testing.T) { ctx, rawdb.NewTable(l2nodeB.ArbDB, storage.StakerPrefix), l2nodeA.L1Reader, - &l1authA, NewFetcherFromConfig(arbnode.ConfigDefaultL1NonSequencerTest()), + &l1authA, NewCommonConfigFetcher(arbnode.ConfigDefaultL1NonSequencerTest()), nil, parentChainID, ) @@ -506,7 +506,7 @@ func TestFastConfirmationWithSafe(t *testing.T) { ctx, rawdb.NewTable(l2nodeB.ArbDB, storage.StakerPrefix), l2nodeB.L1Reader, - &l1authB, NewFetcherFromConfig(cfg), + &l1authB, NewCommonConfigFetcher(cfg), nil, parentChainID, ) diff --git a/system_tests/program_test.go b/system_tests/program_test.go index 9210545a7f..06cfbb4fd7 100644 --- a/system_tests/program_test.go +++ b/system_tests/program_test.go @@ -2204,7 +2204,7 @@ func TestWasmStoreRebuilding(t *testing.T) { // Start rebuilding and wait for it to finish log.Info("starting rebuilding of wasm store") - execConfig := nodeB.ExecNode.ConfigFetcher() + execConfig := builder.execConfig Require(t, gethexec.RebuildWasmStore(ctx, wasmDbAfterDelete, nodeB.ExecNode.ChainDB, execConfig.RPC.MaxRecreateStateDepth, &execConfig.StylusTarget, bc, common.Hash{}, bc.CurrentBlock().Hash())) wasmDbAfterRebuild := nodeB.ExecNode.Backend.ArbInterface().BlockChain().StateCache().WasmStore() diff --git a/system_tests/staker_test.go b/system_tests/staker_test.go index 78c685be15..6bf40b7cfc 100644 --- a/system_tests/staker_test.go +++ b/system_tests/staker_test.go @@ -159,7 +159,7 @@ func stakerTestImpl(t *testing.T, faultyStaker bool, honestStakerInactive bool) ctx, rawdb.NewTable(l2nodeB.ArbDB, storage.StakerPrefix), l2nodeA.L1Reader, - &l1authA, NewFetcherFromConfig(arbnode.ConfigDefaultL1NonSequencerTest()), + &l1authA, NewCommonConfigFetcher(arbnode.ConfigDefaultL1NonSequencerTest()), nil, parentChainID, ) @@ -243,7 +243,7 @@ func stakerTestImpl(t *testing.T, faultyStaker bool, honestStakerInactive bool) rawdb.NewTable(l2nodeB.ArbDB, storage.StakerPrefix), l2nodeB.L1Reader, nil, - NewFetcherFromConfig(cfg), + NewCommonConfigFetcher(cfg), nil, parentChainID, ) diff --git a/system_tests/timeboost_test.go b/system_tests/timeboost_test.go index 7cb0e69638..00d13aeade 100644 --- a/system_tests/timeboost_test.go +++ b/system_tests/timeboost_test.go @@ -1737,6 +1737,7 @@ func setupExpressLaneAuction( expressLaneTracker.Start(ctx) builderSeq.execConfig.Sequencer.Timeboost.Enable = true // Prevents race in sequencer where expressLaneService is read inside publishTransactionToQueue + builderSeq.L2.ExecutionConfigFetcher.Set(builderSeq.execConfig) // Set up an autonomous auction contract service that runs in the background in this test. redisURL := redisutil.CreateTestRedis(ctx, t)