diff --git a/ouroboros-consensus-diffusion/changelog.d/20250512_165249_alexander.esgen_genesis_caughtup_final_chainsel.md b/ouroboros-consensus-diffusion/changelog.d/20250512_165249_alexander.esgen_genesis_caughtup_final_chainsel.md new file mode 100644 index 0000000000..0a49bb7c84 --- /dev/null +++ b/ouroboros-consensus-diffusion/changelog.d/20250512_165249_alexander.esgen_genesis_caughtup_final_chainsel.md @@ -0,0 +1,3 @@ + diff --git a/ouroboros-consensus-diffusion/ouroboros-consensus-diffusion.cabal b/ouroboros-consensus-diffusion/ouroboros-consensus-diffusion.cabal index 58d342b2af..6e0ab70def 100644 --- a/ouroboros-consensus-diffusion/ouroboros-consensus-diffusion.cabal +++ b/ouroboros-consensus-diffusion/ouroboros-consensus-diffusion.cabal @@ -238,6 +238,7 @@ test-suite consensus-test Test.Consensus.Genesis.Tests.CSJ Test.Consensus.Genesis.Tests.DensityDisconnect Test.Consensus.Genesis.Tests.LoE + Test.Consensus.Genesis.Tests.LoE.CaughtUp Test.Consensus.Genesis.Tests.LoP Test.Consensus.Genesis.Tests.LongRangeAttack Test.Consensus.Genesis.Tests.Uniform diff --git a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/LoE.hs b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/LoE.hs index 8fb3957b9a..7a0330ea08 100644 --- a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/LoE.hs +++ b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/LoE.hs @@ -14,6 +14,7 @@ import Ouroboros.Network.Driver.Limits (ProtocolLimitFailure (ExceededTimeLimit)) import Test.Consensus.BlockTree (BlockTree (..), BlockTreeBranch (..)) import Test.Consensus.Genesis.Setup +import qualified Test.Consensus.Genesis.Tests.LoE.CaughtUp as LoE.CaughtUp import Test.Consensus.PeerSimulator.Run (SchedulerConfig (..), defaultSchedulerConfig) import Test.Consensus.PeerSimulator.StateView @@ -38,7 +39,8 @@ tests = adjustQuickCheckMaxSize (`div` 5) $ testProperty "adversary does not hit timeouts" (prop_adversaryHitsTimeouts False), adjustQuickCheckMaxSize (`div` 5) $ - testProperty "adversary hits timeouts" (prop_adversaryHitsTimeouts True) + testProperty "adversary hits timeouts" (prop_adversaryHitsTimeouts True), + LoE.CaughtUp.tests ] -- | Tests that the selection advances in presence of the LoE when a peer is diff --git a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/LoE/CaughtUp.hs b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/LoE/CaughtUp.hs new file mode 100644 index 0000000000..0f474693c5 --- /dev/null +++ b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/LoE/CaughtUp.hs @@ -0,0 +1,294 @@ +{-# LANGUAGE BlockArguments #-} +{-# LANGUAGE DataKinds #-} +{-# LANGUAGE DerivingStrategies #-} +{-# LANGUAGE NamedFieldPuns #-} +{-# LANGUAGE ScopedTypeVariables #-} +{-# LANGUAGE TypeApplications #-} + +-- | This is a regression test for +-- . +-- +-- Concretely, consider @k = 1@ (security parameter), and a syncing Genesis +-- enabled. +-- +-- Now consider the following block tree: +-- +-- > G :> A >: C +-- > :> B +-- +-- Suppose that we have two peers, Peer 1 and Peer 2: +-- +-- * Peer 1 first sends A, then C, then rolls back to A, and then idles. +-- +-- * Peer 2 sends B and then idles. +-- +-- In any possible interleaving (tested using IOSimPOR), the node should in the +-- end be caught-up and have selected C as it is the best chain. +-- +-- To (somewhat) simplify the test setup boilerplate, we do not actually run +-- ChainSync and BlockFetch, but rather simulate their behavior by modifying the +-- ChainSync client state (eg candidate fragments) as well as adding blocks to +-- the ChainDB. +module Test.Consensus.Genesis.Tests.LoE.CaughtUp (tests) where + +import Cardano.Ledger.BaseTypes (knownNonZeroBounded) +import Control.Monad (join) +import Control.Monad.Class.MonadTest (MonadTest (..)) +import qualified Control.Monad.Class.MonadTimer.SI as SI +import Control.Monad.IOSim (exploreSimTrace, traceResult) +import Control.ResourceRegistry +import Control.Tracer (nullTracer) +import Data.Function (on) +import Data.Functor (void) +import Ouroboros.Consensus.Block +import Ouroboros.Consensus.Config +import Ouroboros.Consensus.Genesis.Governor (gddWatcher) +import Ouroboros.Consensus.HeaderValidation (HeaderWithTime) +import Ouroboros.Consensus.MiniProtocol.ChainSync.Client + (ChainSyncClientHandle (..), + ChainSyncClientHandleCollection (..), ChainSyncState (..), + newChainSyncClientHandleCollection) +import Ouroboros.Consensus.MiniProtocol.ChainSync.Client.State + (ChainSyncJumpingState (..), DisengagedInitState (..)) +import Ouroboros.Consensus.Node.Genesis (setGetLoEFragment) +import qualified Ouroboros.Consensus.Node.GSM as GSM +import Ouroboros.Consensus.Node.GsmState +import Ouroboros.Consensus.NodeId +import qualified Ouroboros.Consensus.Storage.ChainDB as ChainDB +import Ouroboros.Consensus.Storage.ChainDB.API (ChainDB) +import qualified Ouroboros.Consensus.Storage.ChainDB.API.Types.InvalidBlockPunishment as Punishment +import qualified Ouroboros.Consensus.Storage.ChainDB.Impl as ChainDB.Impl +import qualified Ouroboros.Consensus.Storage.ChainDB.Impl.Args as ChainDB +import Ouroboros.Consensus.Util.AnchoredFragment + (preferAnchoredCandidate) +import Ouroboros.Consensus.Util.IOLike +import Ouroboros.Consensus.Util.STM (forkLinkedWatcher) +import Ouroboros.Network.AnchoredFragment (AnchoredFragment) +import qualified Ouroboros.Network.AnchoredFragment as AF +import Test.QuickCheck +import Test.Tasty +import Test.Tasty.QuickCheck +import Test.Util.ChainDB +import Test.Util.Header +import Test.Util.Orphans.IOLike () +import Test.Util.TestBlock + +tests :: TestTree +tests = testProperty "Select best chain when CaughtUp" prop_test + +prop_test :: Property +prop_test = + exploreSimTrace id (exploreRaces *> run) \_ tr -> + case traceResult False tr of + Right prop -> prop + Left e -> counterexample ("Failure: " <> show e) False + +run :: forall m. (IOLike m, SI.MonadTimer m) => m Property +run = withRegistry \registry -> do + -- Setup + varGsmState <- newTVarIO PreSyncing + varLoEFragment <- newTVarIO $ AF.Empty AF.AnchorGenesis + varGetLoEFragment <- newTVarIO $ pure $ + ChainDB.LoEEnabled $ AF.Empty AF.AnchorGenesis + setGetLoEFragment + (readTVar varGsmState) + (readTVar varLoEFragment) + varGetLoEFragment + + chainDB <- openChainDB registry (join $ readTVarIO varGetLoEFragment) + let addBlk = ChainDB.addBlock_ chainDB Punishment.noPunishment + + chainSyncHandles <- atomically newChainSyncClientHandleCollection + + _ <- forkLinkedThread registry "GSM" $ + GSM.enterPreSyncing $ mkGsmEntryPoints + chainSyncHandles + chainDB + (atomically . writeTVar varGsmState) + + forkGDD + registry + chainSyncHandles + chainDB + (readTVar varGsmState) + varLoEFragment + + -- Make sure that the ChainDB background thread, the GSM and the GDD are + -- running (any positive amount should do). + threadDelay 1 + + -- Simulate receiving A, B, C and C being rolled back. In the real system, + -- this would happen via ChainSync and BlockFetch. + + _ <- forkLinkedThread registry "Peer1" $ do + -- First, let Peer1 connect, serving block A (without idling). + let initialFrag = attachSlotTimeToFragment cfg $ + AF.Empty AF.AnchorGenesis AF.:> getHeader blkA + hdl <- atomically $ mkTestChainSyncClientHandle initialFrag + atomically $ cschcAddHandle chainSyncHandles peer1 hdl + addBlk blkA + + -- Then, send C. + atomically $ modifyTVar (cschState hdl) $ \s -> ChainSyncState { + csCandidate = csCandidate s AF.:> attachSlotTime cfg (getHeader blkC) + , csLatestSlot = pure $ NotOrigin $ blockSlot blkC + , csIdling = csIdling s + } + addBlk blkC + + -- Finally, roll back to the initial fragment and idle. + atomically $ modifyTVar (cschState hdl) $ \_s -> ChainSyncState { + csCandidate = initialFrag + , csLatestSlot = pure $ AF.headSlot initialFrag + , csIdling = True + } + + _ <- forkLinkedThread registry "Peer2" $ do + -- Let Peer2 connect and send B. + hdl <- atomically + $ mkTestChainSyncClientHandle + $ attachSlotTimeToFragment cfg + $ AF.Empty AF.AnchorGenesis AF.:> getHeader blkB + atomically $ cschcAddHandle chainSyncHandles peer2 hdl + addBlk blkB + + -- Finally, idle. + atomically $ modifyTVar (cschState hdl) $ \s -> ChainSyncState { + csCandidate = csCandidate s + , csLatestSlot = csLatestSlot s + , csIdling = True + } + + -- Give time to process the new blocks (any positive amount should do). + threadDelay 1 + + gsmState <- atomically $ readTVar varGsmState + tipPt <- atomically $ AF.headPoint <$> ChainDB.getCurrentChain chainDB + pure $ conjoin + [ gsmState === CaughtUp + , counterexample ("Selection tip is not C") $ + castPoint tipPt === blockPoint blkC + ] + where + peer1, peer2 :: CoreNodeId + peer1 = CoreNodeId 1 + peer2 = CoreNodeId 2 + + blkA, blkB, blkC :: TestBlock + blkA = firstBlock 1 + blkB = firstBlock 2 + blkC = successorBlock blkA + +{------------------------------------------------------------------------------- + Boilerplate for setting up the various test components +-------------------------------------------------------------------------------} + +cfg :: TopLevelConfig TestBlock +cfg = + singleNodeTestConfigWith + TestBlockCodecConfig + TestBlockStorageConfig + -- To make the test as simple as possible (otherwise, "saturating" the LoE + -- requires more blocks). + (SecurityParam $ knownNonZeroBounded @1) + -- large Genesis window to avoid disconnecting any peers + (GenesisWindow 20) + +mkTestChainSyncClientHandle :: + forall m. IOLike m + => AnchoredFragment (HeaderWithTime TestBlock) + -> STM m (ChainSyncClientHandle m TestBlock) +mkTestChainSyncClientHandle frag = do + varState <- newTVar ChainSyncState { + csCandidate = frag + , csIdling = False + , csLatestSlot = pure $ AF.headSlot frag + } + varJumping <- newTVar $ Disengaged DisengagedDone + varJumpInfo <- newTVar Nothing + pure ChainSyncClientHandle { + cschState = varState + -- Irrelevant for this test (as we don't actually run ChainSync). + , cschOnGsmStateChanged = \_gsmState _curTime -> pure () + , cschGDDKill = pure () + , cschJumping = varJumping + , cschJumpInfo = varJumpInfo + } + +openChainDB :: + forall m. IOLike m + => ResourceRegistry m + -> ChainDB.GetLoEFragment m TestBlock + -> m (ChainDB m TestBlock) +openChainDB registry getLoEFragment = do + chainDbArgs <- do + mcdbNodeDBs <- emptyNodeDBs + let mcdbTopLevelConfig = cfg + configureLoE a = a { ChainDB.cdbsArgs = + (ChainDB.cdbsArgs a) { ChainDB.cdbsLoE = getLoEFragment } + } + pure $ configureLoE $ fromMinimalChainDbArgs MinimalChainDbArgs{ + mcdbChunkInfo = mkTestChunkInfo mcdbTopLevelConfig + , mcdbInitLedger = testInitExtLedger + , mcdbRegistry = registry + , mcdbTopLevelConfig + , mcdbNodeDBs + } + (_, (chainDB, ChainDB.Impl.Internal{ChainDB.Impl.intAddBlockRunner})) <- + allocate + registry + (\_ -> ChainDB.Impl.openDBInternal chainDbArgs False) + (ChainDB.closeDB . fst) + _ <- forkLinkedThread registry "AddBlockRunner" intAddBlockRunner + pure chainDB + +mkGsmEntryPoints :: + forall m. (IOLike m, SI.MonadTimer m) + => ChainSyncClientHandleCollection CoreNodeId m TestBlock + -> ChainDB m TestBlock + -> (GsmState -> m ()) + -> GSM.GsmEntryPoints m +mkGsmEntryPoints varChainSyncHandles chainDB writeGsmState = + GSM.realGsmEntryPoints (id, nullTracer) GSM.GsmView { + GSM.candidateOverSelection + , GSM.peerIsIdle = csIdling + , GSM.equivalent = (==) `on` AF.headPoint + , GSM.getChainSyncStates = fmap cschState <$> cschcMap varChainSyncHandles + , GSM.getCurrentSelection = ChainDB.getCurrentChain chainDB + -- Make sure that we stay in CaughtUp for the duration of the test once we + -- have entered it. + , GSM.minCaughtUpDuration = 10 -- seconds + , GSM.writeGsmState + -- Not interesting for this test. + , GSM.antiThunderingHerd = Nothing + , GSM.setCaughtUpPersistentMark = \_ -> pure () + , GSM.durationUntilTooOld = Nothing + , GSM.isHaaSatisfied = pure True + } + where + candidateOverSelection selection candidateState = + case AF.intersectionPoint selection candFrag of + Nothing -> GSM.CandidateDoesNotIntersect + Just{} -> -- precondition requires intersection + GSM.WhetherCandidateIsBetter + $ preferAnchoredCandidate (configBlock cfg) selection candFrag + where + candFrag = csCandidate candidateState + +forkGDD :: + forall m. IOLike m + => ResourceRegistry m + -> ChainSyncClientHandleCollection CoreNodeId m TestBlock + -> ChainDB m TestBlock + -> STM m GsmState + -> StrictTVar m (AnchoredFragment (HeaderWithTime TestBlock)) + -> m () +forkGDD registry varChainSyncHandles chainDB getGsmState varLoEFrag = + void $ forkLinkedWatcher registry "GDD" $ gddWatcher + cfg + nullTracer + chainDB + (0 :: DiffTime) -- no rate limiting + getGsmState + (cschcMap varChainSyncHandles) + varLoEFrag diff --git a/ouroboros-consensus/changelog.d/20250512_164809_alexander.esgen_genesis_caughtup_final_chainsel.md b/ouroboros-consensus/changelog.d/20250512_164809_alexander.esgen_genesis_caughtup_final_chainsel.md new file mode 100644 index 0000000000..fcc32d2ff0 --- /dev/null +++ b/ouroboros-consensus/changelog.d/20250512_164809_alexander.esgen_genesis_caughtup_final_chainsel.md @@ -0,0 +1,3 @@ +### Patch + +- Changed ChainSel to reprocess LoE-delayed blocks even when LoE is disabled. diff --git a/ouroboros-consensus/changelog.d/20250512_165110_alexander.esgen_genesis_caughtup_final_chainsel.md b/ouroboros-consensus/changelog.d/20250512_165110_alexander.esgen_genesis_caughtup_final_chainsel.md new file mode 100644 index 0000000000..f5add4eaf7 --- /dev/null +++ b/ouroboros-consensus/changelog.d/20250512_165110_alexander.esgen_genesis_caughtup_final_chainsel.md @@ -0,0 +1,5 @@ +### Patch + +- Changed GDD to trigger chain selection when caught-up. In certain edge cases, + this enables the node to promptly select a better chain right after concluding + that it is caught-up. diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Genesis/Governor.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Genesis/Governor.hs index 6a388c6010..822a398e7f 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Genesis/Governor.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Genesis/Governor.hs @@ -1,6 +1,7 @@ {-# LANGUAGE DerivingStrategies #-} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE GADTs #-} +{-# LANGUAGE LambdaCase #-} {-# LANGUAGE NamedFieldPuns #-} {-# LANGUAGE RankNTypes #-} {-# LANGUAGE ScopedTypeVariables #-} @@ -103,16 +104,22 @@ gddWatcher :: -- ^ The LoE fragment. It starts at a (recent) immutable tip and ends at -- the common intersection of the candidate fragments. -> Watcher m - (GsmState, GDDStateView m blk peer) - (Map peer (StrictMaybe (WithOrigin SlotNo), Bool)) + (GDDTrigger (GDDStateView m blk peer)) + (GDDTrigger (Map peer (StrictMaybe (WithOrigin SlotNo), Bool))) gddWatcher cfg tracer chainDb rateLimit getGsmState getHandles varLoEFrag = Watcher { wInitial = Nothing - , wReader = (,) <$> getGsmState <*> getGDDStateView + , wReader , wFingerprint , wNotify } where + wReader :: STM m (GDDTrigger (GDDStateView m blk peer)) + wReader = getGsmState >>= \case + PreSyncing -> pure GDDPreSyncing + Syncing -> GDDSyncing <$> getGDDStateView + CaughtUp -> pure GDDCaughtUp + getGDDStateView :: STM m (GDDStateView m blk peer) getGDDStateView = do curChain <- ChainDB.getCurrentChainWithTime chainDb @@ -127,14 +134,11 @@ gddWatcher cfg tracer chainDb rateLimit getGsmState getHandles varLoEFrag = } wFingerprint :: - (GsmState, GDDStateView m blk peer) - -> Map peer (StrictMaybe (WithOrigin SlotNo), Bool) - wFingerprint (gsmState, GDDStateView{gddCtxStates}) = case gsmState of - -- When we are in 'PreSyncing' (HAA not satisfied) or are caught up, we - -- don't have to run the GDD on changes to the candidate fragments. - -- (Maybe we want to do it in 'PreSycing'?) - PreSyncing -> Map.empty - CaughtUp -> Map.empty + GDDTrigger (GDDStateView m blk peer) + -> GDDTrigger (Map peer (StrictMaybe (WithOrigin SlotNo), Bool)) + wFingerprint = \case + GDDPreSyncing -> GDDPreSyncing + GDDCaughtUp -> GDDCaughtUp -- When syncing, wake up regularly while headers are sent. -- Watching csLatestSlot ensures that GDD is woken up when a peer is -- sending headers even if they are after the forecast horizon. Note @@ -142,22 +146,37 @@ gddWatcher cfg tracer chainDb rateLimit getGsmState getHandles varLoEFrag = -- it becoming visible to GDD. It will be visible only when csLatestSlot -- changes again or when csIdling changes, which is guaranteed to happen -- eventually. - Syncing -> + GDDSyncing GDDStateView{gddCtxStates} -> GDDSyncing $ Map.map (\css -> (csLatestSlot css, csIdling css)) gddCtxStates - wNotify :: (GsmState, GDDStateView m blk peer) -> m () - wNotify (_gsmState, stateView) = do - t0 <- getMonotonicTime - loeFrag <- evaluateGDD cfg tracer stateView - oldLoEFrag <- atomically $ swapTVar varLoEFrag loeFrag - -- The chain selection only depends on the LoE tip, so there - -- is no point in retriggering it if the LoE tip hasn't changed. - when (AF.headHash oldLoEFrag /= AF.headHash loeFrag) $ + wNotify :: GDDTrigger (GDDStateView m blk peer) -> m () + wNotify = \case + -- No need to run the GDD in PreSyncing as the LoE advancing doesn't + -- allow us to select more blocks here by design. + GDDPreSyncing -> pure () + -- Make sure that any LoE-postponed blocks have a chance to be selected. + GDDCaughtUp -> void $ ChainDB.triggerChainSelectionAsync chainDb - tf <- getMonotonicTime - -- We limit the rate at which GDD is evaluated, otherwise it would - -- be called every time a new header is validated. - threadDelay $ rateLimit - diffTime tf t0 + -- Run the GDD on the candidate fragments. + GDDSyncing stateView -> do + t0 <- getMonotonicTime + loeFrag <- evaluateGDD cfg tracer stateView + oldLoEFrag <- atomically $ swapTVar varLoEFrag loeFrag + -- The chain selection only depends on the LoE tip, so there + -- is no point in retriggering it if the LoE tip hasn't changed. + when (AF.headHash oldLoEFrag /= AF.headHash loeFrag) $ + void $ ChainDB.triggerChainSelectionAsync chainDb + tf <- getMonotonicTime + -- We limit the rate at which GDD is evaluated, otherwise it would + -- be called every time a new header is validated. + threadDelay $ rateLimit - diffTime tf t0 + +-- | Data indicating what triggered the GDD. +data GDDTrigger a = + GDDPreSyncing + | GDDSyncing a + | GDDCaughtUp + deriving stock (Show, Eq) -- | Pure snapshot of the dynamic data the GDD operates on. data GDDStateView m blk peer = GDDStateView { diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/ChainDB/Impl/ChainSel.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/ChainDB/Impl/ChainSel.hs index 43f96e5baa..07a4d02992 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/ChainDB/Impl/ChainSel.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/ChainDB/Impl/ChainSel.hs @@ -332,29 +332,29 @@ chainSelSync :: -- We run a background thread that polls the candidate fragments and sends -- 'ChainSelReprocessLoEBlocks' whenever we receive a new header or lose a -- peer. --- If 'cdbLoE' is 'LoEDisabled', this task is skipped. +-- +-- Note that we do this even when we are caught-up, as we might want to select +-- blocks that were originally postponed by the LoE, but can be adopted once we +-- conclude that we are caught-up (and hence are longer bound by the LoE). chainSelSync cdb@CDB{..} (ChainSelReprocessLoEBlocks varProcessed) = do - lift cdbLoE >>= \case - LoEDisabled -> pure () - LoEEnabled _ -> do - (succsOf, chain) <- lift $ atomically $ do - invalid <- forgetFingerprint <$> readTVar cdbInvalid - (,) - <$> (ignoreInvalidSuc cdbVolatileDB invalid <$> - VolatileDB.filterByPredecessor cdbVolatileDB) - <*> Query.getCurrentChain cdb - let - succsOf' = Set.toList . succsOf . pointHash . castPoint - loeHashes = succsOf' (AF.anchorPoint chain) - firstHeader = either (const Nothing) Just $ AF.last chain - -- We avoid the VolatileDB for the headers we already have in the chain - getHeaderFromHash hash = - case firstHeader of - Just header | headerHash header == hash -> pure header - _ -> VolatileDB.getKnownBlockComponent cdbVolatileDB GetHeader hash - loeHeaders <- lift (mapM getHeaderFromHash loeHashes) - for_ loeHeaders $ \hdr -> - chainSelectionForBlock cdb BlockCache.empty hdr noPunishment + (succsOf, chain) <- lift $ atomically $ do + invalid <- forgetFingerprint <$> readTVar cdbInvalid + (,) + <$> (ignoreInvalidSuc cdbVolatileDB invalid <$> + VolatileDB.filterByPredecessor cdbVolatileDB) + <*> Query.getCurrentChain cdb + let + succsOf' = Set.toList . succsOf . pointHash . castPoint + loeHashes = succsOf' (AF.anchorPoint chain) + firstHeader = either (const Nothing) Just $ AF.last chain + -- We avoid the VolatileDB for the headers we already have in the chain + getHeaderFromHash hash = + case firstHeader of + Just header | headerHash header == hash -> pure header + _ -> VolatileDB.getKnownBlockComponent cdbVolatileDB GetHeader hash + loeHeaders <- lift (mapM getHeaderFromHash loeHashes) + for_ loeHeaders $ \hdr -> + chainSelectionForBlock cdb BlockCache.empty hdr noPunishment lift $ atomically $ putTMVar varProcessed () chainSelSync cdb@CDB {..} (ChainSelAddBlock BlockToAdd { blockToAdd = b, .. }) = do