Skip to content
Open
Show file tree
Hide file tree
Changes from 15 commits
Commits
Show all changes
21 commits
Select commit Hold shift + click to select a range
1fd01f5
eth/downloader, eth/protocols/snap: BAL req wiring
jrhea Mar 31, 2026
15acf66
eth/downloader, eth/protocols/snap: remove healing and genTrie, restr…
jrhea Mar 31, 2026
f44221c
eth/protocols/snap: Implement BAL fetching
jrhea Apr 1, 2026
646a753
eth/protocols/snap: create functions for bal verification and apply
jrhea Apr 2, 2026
b0b6e44
eth/downloader,eth/protocols/snap: implement catch-up on pivot
jrhea Apr 2, 2026
5b49128
eth/protocols/snap: add tests and fix peer registration for access lists
jrhea Apr 2, 2026
c6ef6f2
eth/protocols/snap: add pivot movement integration tests
jrhea Apr 2, 2026
1048dd8
eth/protocols/snap: implement trie rebuild and tests
jrhea Apr 2, 2026
87a4a38
eth/downloader, eth/protocols: clean up todos
jrhea Apr 3, 2026
d111e9f
eth/downloader: implement deep reorg detection and restart/wipe logic…
jrhea Apr 3, 2026
3180c2f
eth/protocols/snap: fix rlp.RawList and Bytes field after rebase and …
jrhea Apr 7, 2026
f68d736
core, core/state/snapshot: skip snapshot generation after sync comple…
jrhea Apr 7, 2026
247d7fc
eth/protocols/snap: fix test failure after rebase
jrhea Apr 7, 2026
9cb1b9e
eth/protocols/snap: deleted accessListsInput, decode directly into Ac…
jrhea Apr 7, 2026
c943ea3
fix lint errors having to do with unused healing methods
jrhea Apr 7, 2026
c63f0ef
eth/protocols/snap: fix lost hashes on short access list responses an…
jrhea Apr 7, 2026
8bad150
eth/protocols/snap: skip new empty accounts in applyAccessList and test
jrhea Apr 7, 2026
6987d2e
eth/protocols/snap: hash raw storage slot keys in applyAccessList and…
jrhea Apr 7, 2026
e0a8f61
eth/protocols/snap: guard against from > to in catchup() and test
jrhea Apr 8, 2026
59918ab
eth/protocols/snap: added read lock in catchup and some reorg
jrhea Apr 8, 2026
589d284
eth/protocols/snap: fix issue where emptylist results for BALs are no…
jrhea Apr 9, 2026
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions core/blockchain.go
Original file line number Diff line number Diff line change
Expand Up @@ -1177,10 +1177,10 @@ func (bc *BlockChain) SnapSyncComplete(hash common.Hash) error {
if !bc.HasState(root) {
return fmt.Errorf("non existent state [%x..]", root[:4])
}
// Destroy any existing state snapshot and regenerate it in the background,
// also resuming the normal maintenance of any previously paused snapshot.
// Set up the snapshot tree from the synced flat state. Snap/2 downloads
// flat state directly as the snapshot.
if bc.snaps != nil {
bc.snaps.Rebuild(root)
bc.snaps.RebuildFromSyncedState(root)
}

// If all checks out, manually set the head block.
Expand Down
24 changes: 24 additions & 0 deletions core/state/snapshot/snapshot.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ import (
"fmt"
"sync"

"github.com/VictoriaMetrics/fastcache"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/types"
Expand Down Expand Up @@ -726,6 +727,29 @@ func (t *Tree) Rebuild(root common.Hash) {
}
}

// RebuildFromSyncedState sets up the snapshot tree to use flat state that was
// already downloaded by snap sync. Unlike Rebuild, it does NOT regenerate the
// snapshot from the trie.
func (t *Tree) RebuildFromSyncedState(root common.Hash) {
Copy link
Copy Markdown
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@rjl493456442 I know the snapshot package is being deprecated, but I wasn't sure where to put this.

t.lock.Lock()
defer t.lock.Unlock()

rawdb.DeleteSnapshotRecoveryNumber(t.diskdb)
rawdb.DeleteSnapshotDisabled(t.diskdb)
rawdb.WriteSnapshotRoot(t.diskdb, root)
journalProgress(t.diskdb, nil, nil)

log.Info("Setting up snapshot from synced state", "root", root)
t.layers = map[common.Hash]snapshot{
root: &diskLayer{
diskdb: t.diskdb,
triedb: t.triedb,
cache: fastcache.New(t.config.CacheSize * 1024 * 1024),
root: root,
},
}
}

// AccountIterator creates a new account iterator for the specified root hash and
// seeks to a starting account hash.
func (t *Tree) AccountIterator(root common.Hash, seek common.Hash) (AccountIterator, error) {
Expand Down
55 changes: 40 additions & 15 deletions eth/downloader/downloader.go
Original file line number Diff line number Diff line change
Expand Up @@ -276,7 +276,7 @@ func (d *Downloader) Progress() ethereum.SyncProgress {
default:
log.Error("Unknown downloader mode", "mode", mode)
}
progress, pending := d.SnapSyncer.Progress()
progress := d.SnapSyncer.Progress()

return ethereum.SyncProgress{
StartingBlock: d.syncStatsChainOrigin,
Expand All @@ -288,12 +288,6 @@ func (d *Downloader) Progress() ethereum.SyncProgress {
SyncedBytecodeBytes: uint64(progress.BytecodeBytes),
SyncedStorage: progress.StorageSynced,
SyncedStorageBytes: uint64(progress.StorageBytes),
HealedTrienodes: progress.TrienodeHealSynced,
HealedTrienodeBytes: uint64(progress.TrienodeHealBytes),
HealedBytecodes: progress.BytecodeHealSynced,
HealedBytecodeBytes: uint64(progress.BytecodeHealBytes),
HealingTrienodes: pending.TrienodeHeal,
HealingBytecode: pending.BytecodeHeal,
}
}

Expand Down Expand Up @@ -873,13 +867,43 @@ func (d *Downloader) importBlockResults(results []*fetchResult) error {
return nil
}

// checkDeepReorg checks if the old pivot block was reorged by comparing its
// state root against the current canonical chain. If the canonical header at
// the old pivot's block number has a different state root, the syncer's flat
// state is from the old fork and must be wiped. Returns true if a deep reorg
// was detected.
func checkDeepReorg(db ethdb.Database, oldNumber uint64, oldRoot common.Hash) bool {
oldHash := rawdb.ReadCanonicalHash(db, oldNumber)
if oldHash == (common.Hash{}) {
return false
}
oldHeader := rawdb.ReadHeader(db, oldHash, oldNumber)
if oldHeader == nil {
return false
}
return oldHeader.Root != oldRoot
}

// restartSnapSync cancels the current state sync and starts a new one with the
// given root. Before restarting, it checks for deep reorgs and wipes sync
// progress if the old pivot was reorged.
func (d *Downloader) restartSnapSync(oldSync *stateSync, newRoot common.Hash, newNumber uint64) *stateSync {
if checkDeepReorg(d.stateDB, oldSync.number, oldSync.root) {
log.Warn("Deep reorg detected, restarting snap sync from scratch",
"number", oldSync.number, "oldRoot", oldSync.root)
rawdb.WriteSnapshotSyncStatus(d.stateDB, nil)
}
oldSync.Cancel()
return d.syncState(newRoot, newNumber)
}

// processSnapSyncContent takes fetch results from the queue and writes them to the
// database. It also controls the synchronisation of state nodes of the pivot block.
func (d *Downloader) processSnapSyncContent() error {
// Start syncing state of the reported head block. This should get us most of
// the state of the pivot block.
d.pivotLock.RLock()
sync := d.syncState(d.pivotHeader.Root)
sync := d.syncState(d.pivotHeader.Root, d.pivotHeader.Number.Uint64())
d.pivotLock.RUnlock()

defer func() {
Expand Down Expand Up @@ -950,9 +974,7 @@ func (d *Downloader) processSnapSyncContent() error {
if oldPivot == nil { // no results piling up, we can move the pivot
if !d.committed.Load() { // not yet passed the pivot, we can move the pivot
if pivot.Root != sync.root { // pivot position changed, we can move the pivot
sync.Cancel()
sync = d.syncState(pivot.Root)

sync = d.restartSnapSync(sync, pivot.Root, pivot.Number.Uint64())
go closeOnErr(sync)
}
}
Expand All @@ -966,9 +988,7 @@ func (d *Downloader) processSnapSyncContent() error {
if P != nil {
// If new pivot block found, cancel old state retrieval and restart
if oldPivot != P {
sync.Cancel()
sync = d.syncState(P.Header.Root)

sync = d.restartSnapSync(sync, P.Header.Root, P.Header.Number.Uint64())
go closeOnErr(sync)
oldPivot = P
}
Expand Down Expand Up @@ -1086,7 +1106,12 @@ func (d *Downloader) DeliverSnapPacket(peer *snap.Peer, packet snap.Packet) erro
return d.SnapSyncer.OnByteCodes(peer, packet.ID, packet.Codes)

case *snap.TrieNodesPacket:
return d.SnapSyncer.OnTrieNodes(peer, packet.ID, packet.Nodes)
// Snap/2 no longer requests trie nodes. Stale responses from
// snap/1 peers are silently ignored.
return nil

case *snap.AccessListsPacket:
return d.SnapSyncer.OnAccessLists(peer, packet.ID, packet.AccessLists)

default:
return fmt.Errorf("unexpected snap packet type: %T", packet)
Expand Down
84 changes: 71 additions & 13 deletions eth/downloader/downloader_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -358,20 +358,15 @@ func (dlp *downloadTesterPeer) RequestByteCodes(id uint64, hashes []common.Hash,
return nil
}

// RequestTrieNodes fetches a batch of account or storage trie nodes.
func (dlp *downloadTesterPeer) RequestTrieNodes(id uint64, root common.Hash, count int, paths []snap.TrieNodePathSet, bytes int) error {
encPaths, err := rlp.EncodeToRawList(paths)
if err != nil {
panic(err)
}
req := &snap.GetTrieNodesPacket{
ID: id,
Root: root,
Paths: encPaths,
Bytes: uint64(bytes),
// RequestAccessLists fetches a batch of BALs by block hash.
func (dlp *downloadTesterPeer) RequestAccessLists(id uint64, hashes []common.Hash, bytes int) error {
req := &snap.GetAccessListsPacket{
ID: id,
Hashes: hashes,
Bytes: uint64(bytes),
}
nodes, _ := snap.ServiceGetTrieNodesQuery(dlp.chain, req)
go dlp.dl.downloader.SnapSyncer.OnTrieNodes(dlp, id, nodes)
als := snap.ServiceGetAccessListsQuery(dlp.chain, req)
go dlp.dl.downloader.SnapSyncer.OnAccessLists(dlp, id, als)
return nil
}

Expand Down Expand Up @@ -704,3 +699,66 @@ func testSyncProgress(t *testing.T, protocol uint, mode SyncMode) {
t.Fatalf("Failed to sync chain in three seconds")
}
}

// TestDeepReorgDetection verifies that checkDeepReorg correctly identifies
// when the canonical chain has reorged past the old pivot.
func TestDeepReorgDetection(t *testing.T) {
db := rawdb.NewMemoryDatabase()
pivotNumber := uint64(100)
pivotRoot := common.HexToHash("0xaaaa")
reorgedRoot := common.HexToHash("0xbbbb")

// Write a header at the pivot number with a different root (simulating reorg)
header := &types.Header{
Number: new(big.Int).SetUint64(pivotNumber),
Root: reorgedRoot,
Difficulty: common.Big0,
}
rawdb.WriteHeader(db, header)
rawdb.WriteCanonicalHash(db, header.Hash(), pivotNumber)

// Should detect reorg: canonical root differs from our pivot root
if !checkDeepReorg(db, pivotNumber, pivotRoot) {
t.Fatal("expected deep reorg detection when roots differ")
}

// Should NOT detect reorg: canonical root matches our pivot root
if checkDeepReorg(db, pivotNumber, reorgedRoot) {
t.Fatal("should not detect reorg when roots match")
}

// Should NOT detect reorg: no canonical hash at that number
if checkDeepReorg(db, 999, pivotRoot) {
t.Fatal("should not detect reorg when block number is unknown")
}
}

// TestDeepReorgWipesProgress verifies that when a deep reorg is detected,
// snap sync progress is wiped so the syncer starts fresh.
func TestDeepReorgWipesProgress(t *testing.T) {
db := rawdb.NewMemoryDatabase()
pivotNumber := uint64(100)
pivotRoot := common.HexToHash("0xaaaa")
reorgedRoot := common.HexToHash("0xbbbb")

// Write some snap sync progress
rawdb.WriteSnapshotSyncStatus(db, []byte("some progress"))

// Write a reorged header
header := &types.Header{
Number: new(big.Int).SetUint64(pivotNumber),
Root: reorgedRoot,
Difficulty: common.Big0,
}
rawdb.WriteHeader(db, header)
rawdb.WriteCanonicalHash(db, header.Hash(), pivotNumber)

// Simulate what restartSnapSync does
if checkDeepReorg(db, pivotNumber, pivotRoot) {
rawdb.WriteSnapshotSyncStatus(db, nil)
}
// Progress should be wiped
if status := rawdb.ReadSnapshotSyncStatus(db); status != nil {
t.Fatal("snap sync progress should be wiped after deep reorg")
}
}
16 changes: 9 additions & 7 deletions eth/downloader/statesync.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,10 +23,10 @@ import (
"github.com/ethereum/go-ethereum/log"
)

// syncState starts downloading state with the given root hash.
func (d *Downloader) syncState(root common.Hash) *stateSync {
// syncState starts downloading state with the given root hash and block number.
func (d *Downloader) syncState(root common.Hash, number uint64) *stateSync {
// Create the state sync
s := newStateSync(d, root)
s := newStateSync(d, root, number)
select {
case d.stateSyncStart <- s:
// If we tell the statesync to restart with a new root, we also need
Expand Down Expand Up @@ -77,8 +77,9 @@ func (d *Downloader) runStateSync(s *stateSync) *stateSync {
// stateSync schedules requests for downloading a particular state trie defined
// by a given state root.
type stateSync struct {
d *Downloader // Downloader instance to access and manage current peerset
root common.Hash // State root currently being synced
d *Downloader // Downloader instance to access and manage current peerset
root common.Hash // State root currently being synced
number uint64 // Block number of the pivot

started chan struct{} // Started is signalled once the sync loop starts
cancel chan struct{} // Channel to signal a termination request
Expand All @@ -89,10 +90,11 @@ type stateSync struct {

// newStateSync creates a new state trie download scheduler. This method does not
// yet start the sync. The user needs to call run to initiate.
func newStateSync(d *Downloader, root common.Hash) *stateSync {
func newStateSync(d *Downloader, root common.Hash, number uint64) *stateSync {
return &stateSync{
d: d,
root: root,
number: number,
cancel: make(chan struct{}),
done: make(chan struct{}),
started: make(chan struct{}),
Expand All @@ -104,7 +106,7 @@ func newStateSync(d *Downloader, root common.Hash) *stateSync {
// finish.
func (s *stateSync) run() {
close(s.started)
s.err = s.d.SnapSyncer.Sync(s.root, s.cancel)
s.err = s.d.SnapSyncer.Sync(s.root, s.number, s.cancel)
close(s.done)
}

Expand Down
Loading
Loading