@@ -401,85 +401,106 @@ func NewNodeWithContext(ctx context.Context,
401401 return nil , err
402402 }
403403
404- mempool , mempoolReactor := createMempoolAndMempoolReactor (config , proxyApp , state , memplMetrics , logger , tracer )
404+ var mempool mempl.Mempool
405+ var mempoolReactor p2p.Reactor
406+ var evidenceReactor * evidence.Reactor
407+ var evidencePool * evidence.Pool
405408
406- evidenceReactor , evidencePool , err := createEvidenceReactor (config , dbProvider , stateStore , blockStore , logger )
407- if err != nil {
408- return nil , err
409+ // Skip non-PEX reactors in seed mode
410+ if ! config .P2P .SeedMode {
411+ mempool , mempoolReactor = createMempoolAndMempoolReactor (config , proxyApp , state , memplMetrics , logger , tracer )
412+
413+ evidenceReactor , evidencePool , err = createEvidenceReactor (config , dbProvider , stateStore , blockStore , logger )
414+ if err != nil {
415+ return nil , err
416+ }
409417 }
410418
411- // make block executor for consensus and blocksync reactors to execute blocks
412- blockExec := sm .NewBlockExecutor (
413- stateStore ,
414- logger .With ("module" , "state" ),
415- proxyApp .Consensus (),
416- mempool ,
417- evidencePool ,
418- blockStore ,
419- sm .BlockExecutorWithMetrics (smMetrics ),
420- sm .BlockExecutorWithRootDir (config .RootDir ),
421- sm .BlockExecutorWithTracer (tracer ),
422- )
419+ var blockExec * sm.BlockExecutor
420+ var bcReactor p2p.Reactor
421+ var propagationReactor * propagation.Reactor
422+ var consensusReactor * cs.Reactor
423+ var consensusState * cs.State
424+ var stateSyncReactor * statesync.Reactor
425+
426+ // Skip non-PEX reactors in seed mode
427+ if ! config .P2P .SeedMode {
428+ // make block executor for consensus and blocksync reactors to execute blocks
429+ blockExec = sm .NewBlockExecutor (
430+ stateStore ,
431+ logger .With ("module" , "state" ),
432+ proxyApp .Consensus (),
433+ mempool ,
434+ evidencePool ,
435+ blockStore ,
436+ sm .BlockExecutorWithMetrics (smMetrics ),
437+ sm .BlockExecutorWithRootDir (config .RootDir ),
438+ sm .BlockExecutorWithTracer (tracer ),
439+ )
423440
424- offlineStateSyncHeight := int64 (0 )
425- if blockStore .Height () == 0 {
426- offlineStateSyncHeight , err = blockExec .Store ().GetOfflineStateSyncHeight ()
427- if err != nil && err .Error () != "value empty" {
428- panic (fmt .Sprintf ("failed to retrieve statesynced height from store %s; expected state store height to be %v" , err , state .LastBlockHeight ))
441+ offlineStateSyncHeight := int64 (0 )
442+ if blockStore .Height () == 0 {
443+ offlineStateSyncHeight , err = blockExec .Store ().GetOfflineStateSyncHeight ()
444+ if err != nil && err .Error () != "value empty" {
445+ panic (fmt .Sprintf ("failed to retrieve statesynced height from store %s; expected state store height to be %v" , err , state .LastBlockHeight ))
446+ }
447+ }
448+ // Don't start block sync if we're doing a state sync first.
449+ bcReactor , err = createBlocksyncReactor (config , state , blockExec , blockStore , blockSync && ! stateSync , localAddr , logger , bsMetrics , offlineStateSyncHeight )
450+ if err != nil {
451+ return nil , fmt .Errorf ("could not create blocksync reactor: %w" , err )
429452 }
430- }
431- // Don't start block sync if we're doing a state sync first.
432- bcReactor , err := createBlocksyncReactor (config , state , blockExec , blockStore , blockSync && ! stateSync , localAddr , logger , bsMetrics , offlineStateSyncHeight )
433- if err != nil {
434- return nil , fmt .Errorf ("could not create blocksync reactor: %w" , err )
435- }
436-
437- if state .TimeoutCommit > 0 {
438- // set the catchup retry time to match the block time
439- propagation .RetryTime = state .TimeoutCommit
440- }
441- partsChan := make (chan types.PartInfo , 2500 )
442- proposalChan := make (chan types.Proposal , 100 )
443- propagationReactor := propagation .NewReactor (
444- nodeKey .ID (),
445- propagation.Config {
446- Store : blockStore ,
447- Mempool : mempool ,
448- Privval : privValidator ,
449- ChainID : state .ChainID ,
450- BlockMaxBytes : state .ConsensusParams .Block .MaxBytes ,
451- PartChan : partsChan ,
452- ProposalChan : proposalChan ,
453- },
454- propagation .WithTracer (tracer ),
455- )
456- if ! stateSync && ! blockSync {
457- propagationReactor .StartProcessing ()
458- }
459453
460- consensusReactor , consensusState := createConsensusReactor (
461- config , state , blockExec , blockStore , mempool , evidencePool ,
462- privValidator , csMetrics , propagationReactor , stateSync || blockSync , eventBus , consensusLogger , offlineStateSyncHeight , tracer , partsChan , proposalChan ,
463- )
454+ if state .TimeoutCommit > 0 {
455+ // set the catchup retry time to match the block time
456+ propagation .RetryTime = state .TimeoutCommit
457+ }
458+ partsChan := make (chan types.PartInfo , 2500 )
459+ proposalChan := make (chan types.Proposal , 100 )
460+ propagationReactor = propagation .NewReactor (
461+ nodeKey .ID (),
462+ propagation.Config {
463+ Store : blockStore ,
464+ Mempool : mempool ,
465+ Privval : privValidator ,
466+ ChainID : state .ChainID ,
467+ BlockMaxBytes : state .ConsensusParams .Block .MaxBytes ,
468+ PartChan : partsChan ,
469+ ProposalChan : proposalChan ,
470+ },
471+ propagation .WithTracer (tracer ),
472+ )
473+ if ! stateSync && ! blockSync {
474+ propagationReactor .StartProcessing ()
475+ }
464476
465- err = stateStore .SetOfflineStateSyncHeight (0 )
466- if err != nil {
467- panic (fmt .Sprintf ("failed to reset the offline state sync height %s" , err ))
468- }
469- propagationReactor .SetLogger (logger .With ("module" , "propagation" ))
470-
471- logger .Info ("Consensus reactor created" , "timeout_propose" , consensusState .GetState ().TimeoutPropose , "timeout_commit" , consensusState .GetState ().TimeoutCommit )
472- // Set up state sync reactor, and schedule a sync if requested.
473- // FIXME The way we do phased startups (e.g. replay -> block sync -> consensus) is very messy,
474- // we should clean this whole thing up. See:
475- // https://github.com/tendermint/tendermint/issues/4644
476- stateSyncReactor := statesync .NewReactor (
477- * config .StateSync ,
478- proxyApp .Snapshot (),
479- proxyApp .Query (),
480- ssMetrics ,
481- )
482- stateSyncReactor .SetLogger (logger .With ("module" , "statesync" ))
477+ consensusReactor , consensusState = createConsensusReactor (
478+ config , state , blockExec , blockStore , mempool , evidencePool ,
479+ privValidator , csMetrics , propagationReactor , stateSync || blockSync , eventBus , consensusLogger , offlineStateSyncHeight , tracer , partsChan , proposalChan ,
480+ )
481+ }
482+
483+ // Skip non-PEX reactors in seed mode
484+ if ! config .P2P .SeedMode {
485+ err = stateStore .SetOfflineStateSyncHeight (0 )
486+ if err != nil {
487+ panic (fmt .Sprintf ("failed to reset the offline state sync height %s" , err ))
488+ }
489+ propagationReactor .SetLogger (logger .With ("module" , "propagation" ))
490+
491+ logger .Info ("Consensus reactor created" , "timeout_propose" , consensusState .GetState ().TimeoutPropose , "timeout_commit" , consensusState .GetState ().TimeoutCommit )
492+ // Set up state sync reactor, and schedule a sync if requested.
493+ // FIXME The way we do phased startups (e.g. replay -> block sync -> consensus) is very messy,
494+ // we should clean this whole thing up. See:
495+ // https://github.com/tendermint/tendermint/issues/4644
496+ stateSyncReactor = statesync .NewReactor (
497+ * config .StateSync ,
498+ proxyApp .Snapshot (),
499+ proxyApp .Query (),
500+ ssMetrics ,
501+ )
502+ stateSyncReactor .SetLogger (logger .With ("module" , "statesync" ))
503+ }
483504
484505 nodeInfo , err := makeNodeInfo (config , nodeKey , txIndexer , genDoc , state , softwareVersion )
485506 if err != nil {
0 commit comments