diff --git a/bridgesync/downloader.go b/bridgesync/downloader.go index e457b3d48..f17d6520f 100644 --- a/bridgesync/downloader.go +++ b/bridgesync/downloader.go @@ -53,6 +53,7 @@ var ( setClaimEventSignature = crypto.Keccak256Hash([]byte( "SetClaim(bytes32)", )) + backwardLETEventSignature = crypto.Keccak256Hash([]byte("BackwardLET(uint256,bytes32,uint256,bytes32)")) claimAssetEtrogMethodID = common.Hex2Bytes("ccaa2d11") claimMessageEtrogMethodID = common.Hex2Bytes("f5efcd79") @@ -115,6 +116,10 @@ func buildAppender( appender[removeLegacySovereignTokenEventSignature] = buildRemoveLegacyTokenHandler(bridgeDeployment.agglayerBridgeL2) appender[unsetClaimEventSignature] = buildUnsetClaimEventHandler(bridgeDeployment.agglayerBridgeL2) appender[setClaimEventSignature] = buildSetClaimEventHandler(bridgeDeployment.agglayerBridgeL2) + appender[backwardLETEventSignature] = buildBackwardLETEventHandler(bridgeDeployment.agglayerBridgeL2) + + default: + return nil, fmt.Errorf("unsupported bridge deployment kind: %d", bridgeDeployment.kind) } return appender, nil @@ -639,6 +644,26 @@ func buildSetClaimEventHandler(contract *agglayerbridgel2.Agglayerbridgel2) func } } +// buildBackwardLETEventHandler creates a handler for the BackwardLET event log +func buildBackwardLETEventHandler(contract *agglayerbridgel2.Agglayerbridgel2) func(*sync.EVMBlock, types.Log) error { + return func(b *sync.EVMBlock, l types.Log) error { + event, err := contract.ParseBackwardLET(l) + if err != nil { + return fmt.Errorf("error parsing BackwardLET event log %+v: %w", l, err) + } + + b.Events = append(b.Events, Event{BackwardLET: &BackwardLET{ + BlockNum: b.Num, + BlockPos: uint64(l.Index), + PreviousDepositCount: event.PreviousDepositCount, + PreviousRoot: event.PreviousRoot, + NewDepositCount: event.NewDepositCount, + NewRoot: event.NewRoot, + }}) + return nil + } +} + type Call struct { From common.Address `json:"from"` To common.Address `json:"to"` diff --git a/bridgesync/downloader_test.go b/bridgesync/downloader_test.go index d2d2ac5c2..7821a5ef2 100644 --- a/bridgesync/downloader_test.go +++ b/bridgesync/downloader_test.go @@ -282,6 +282,7 @@ func TestBuildAppender(t *testing.T) { eventSignature common.Hash deploymentKind BridgeDeployment logBuilder func() (types.Log, error) + expectedErr string }{ { name: "bridgeEventSignature appender", @@ -573,6 +574,38 @@ func TestBuildAppender(t *testing.T) { return l, nil }, }, + { + name: "backwardLETSignature appender", + eventSignature: backwardLETEventSignature, + deploymentKind: SovereignChain, + logBuilder: func() (types.Log, error) { + event, err := bridgeL2Abi.EventByID(backwardLETEventSignature) + if err != nil { + return types.Log{}, err + } + + previousDepositCount := big.NewInt(10) + previousRoot := common.HexToHash("0xdeadbeef") + newDepositCount := big.NewInt(8) + newRoot := common.HexToHash("0x5ca1e") + data, err := event.Inputs.Pack(previousDepositCount, previousRoot, newDepositCount, newRoot) + if err != nil { + return types.Log{}, err + } + + l := types.Log{ + Topics: []common.Hash{backwardLETEventSignature}, + Data: data, + } + return l, nil + }, + }, + { + name: "unknown deployment kind", + deploymentKind: 100, + logBuilder: func() (types.Log, error) { return types.Log{}, nil }, + expectedErr: "unsupported bridge deployment kind: 100", + }, } for _, tt := range tests { @@ -583,17 +616,21 @@ func TestBuildAppender(t *testing.T) { logger := logger.WithFields("module", "test") bridgeDeployment.kind = tt.deploymentKind appenderMap, err := buildAppender(t.Context(), ethClient, bridgeAddr, false, bridgeDeployment, logger) - require.NoError(t, err) - require.NotNil(t, appenderMap) + if tt.expectedErr == "" { + require.NoError(t, err) + require.NotNil(t, appenderMap) - block := &sync.EVMBlock{EVMBlockHeader: sync.EVMBlockHeader{Num: blockNum}} + block := &sync.EVMBlock{EVMBlockHeader: sync.EVMBlockHeader{Num: blockNum}} - appenderFunc, exists := appenderMap[tt.eventSignature] - require.True(t, exists) + appenderFunc, exists := appenderMap[tt.eventSignature] + require.True(t, exists) - err = appenderFunc(block, log) - require.NoError(t, err) - require.Len(t, block.Events, 1) + err = appenderFunc(block, log) + require.NoError(t, err) + require.Len(t, block.Events, 1) + } else { + require.ErrorContains(t, err, tt.expectedErr) + } }) } } diff --git a/bridgesync/migrations/bridgesync0012.sql b/bridgesync/migrations/bridgesync0012.sql index aab4673de..983d24982 100644 --- a/bridgesync/migrations/bridgesync0012.sql +++ b/bridgesync/migrations/bridgesync0012.sql @@ -1,6 +1,41 @@ -- +migrate Down +DROP TABLE IF EXISTS bridge_archive; +DROP TABLE IF EXISTS backward_let; +ALTER TABLE bridge DROP COLUMN source; ALTER TABLE bridge DROP COLUMN to_address; -- +migrate Up +CREATE TABLE IF NOT EXISTS backward_let ( + block_num INTEGER NOT NULL REFERENCES block (num) ON DELETE CASCADE, + block_pos INTEGER NOT NULL, + previous_deposit_count TEXT NOT NULL, + previous_root VARCHAR NOT NULL, + new_deposit_count TEXT NOT NULL, + new_root VARCHAR NOT NULL, + PRIMARY KEY (block_num, block_pos) + ); + +ALTER TABLE bridge ADD COLUMN source TEXT DEFAULT ''; ALTER TABLE bridge ADD COLUMN to_address VARCHAR; +------------------------------------------------------------------------------ +-- Create bridge_archive table +------------------------------------------------------------------------------ +CREATE TABLE IF NOT EXISTS bridge_archive ( + deposit_count INTEGER PRIMARY KEY, + block_num INTEGER NOT NULL, + block_pos INTEGER NOT NULL, + leaf_type INTEGER NOT NULL, + origin_network INTEGER NOT NULL, + origin_address VARCHAR NOT NULL, + destination_network INTEGER NOT NULL, + destination_address VARCHAR NOT NULL, + amount TEXT NOT NULL, + metadata BLOB, + tx_hash VARCHAR, + block_timestamp INTEGER, + txn_sender VARCHAR, + from_address VARCHAR, + source TEXT DEFAULT '', + to_address VARCHAR + ); diff --git a/bridgesync/migrations/migrations.go b/bridgesync/migrations/migrations.go index cedf6c412..a806e684a 100644 --- a/bridgesync/migrations/migrations.go +++ b/bridgesync/migrations/migrations.go @@ -1,100 +1,70 @@ package migrations import ( - _ "embed" + "embed" + "fmt" + "sort" + "strings" "github.com/agglayer/aggkit/db" "github.com/agglayer/aggkit/db/types" - treeMigrations "github.com/agglayer/aggkit/tree/migrations" + treemigrations "github.com/agglayer/aggkit/tree/migrations" ) -//go:embed bridgesync0001.sql -var mig0001 string - -//go:embed bridgesync0002.sql -var mig0002 string +var ( + //go:embed *.sql + migrationFS embed.FS + migrations []types.Migration +) -//go:embed bridgesync0003.sql -var mig0003 string +func init() { + entries, err := migrationFS.ReadDir(".") + if err != nil { + panic(fmt.Errorf("failed to read embedded migrations: %w", err)) + } -//go:embed bridgesync0004.sql -var mig0004 string + for _, e := range entries { + name := e.Name() // e.g. "bridgesync0004.sql" -//go:embed bridgesync0005.sql -var mig0005 string + sqlBytes, err := migrationFS.ReadFile(name) + if err != nil { + panic(err) + } -//go:embed bridgesync0006.sql -var mig0006 string + id := strings.TrimSuffix(name, ".sql") // "bridgesync0004" -//go:embed bridgesync0007.sql -var mig0007 string + migrations = append(migrations, types.Migration{ + ID: id, + SQL: string(sqlBytes), + }) + } -//go:embed bridgesync0008.sql -var mig0008 string + // Ensure deterministic canonical order + sort.Slice(migrations, func(i, j int) bool { + return migrations[i].ID < migrations[j].ID + }) +} -//go:embed bridgesync0009.sql -var mig0009 string +func RunMigrations(dbPath string) error { + // Allocate slice with exact capacity to avoid reallocations when combining migrations + total := len(migrations) + len(treemigrations.Migrations) -//go:embed bridgesync0010.sql -var mig0010 string + combined := make([]types.Migration, 0, total) + // Copy migrations + combined = append(combined, migrations...) + combined = append(combined, treemigrations.Migrations...) -//go:embed bridgesync0011.sql -var mig0011 string + // Pass the copy to db.RunMigrations + return db.RunMigrations(dbPath, combined) +} -//go:embed bridgesync0012.sql -var mig0012 string +// GetUpTo returns all migrations up to and including the migration with the given ID. +func GetUpTo(lastID string) []types.Migration { + idx := sort.Search(len(migrations), func(i int) bool { + return migrations[i].ID > lastID + }) -func RunMigrations(dbPath string) error { - migrations := []types.Migration{ - { - ID: "bridgesync0001", - SQL: mig0001, - }, - { - ID: "bridgesync0002", - SQL: mig0002, - }, - { - ID: "bridgesync0003", - SQL: mig0003, - }, - { - ID: "bridgesync0004", - SQL: mig0004, - }, - { - ID: "bridgesync0005", - SQL: mig0005, - }, - { - ID: "bridgesync0006", - SQL: mig0006, - }, - { - ID: "bridgesync0007", - SQL: mig0007, - }, - { - ID: "bridgesync0008", - SQL: mig0008, - }, - { - ID: "bridgesync0009", - SQL: mig0009, - }, - { - ID: "bridgesync0010", - SQL: mig0010, - }, - { - ID: "bridgesync0011", - SQL: mig0011, - }, - { - ID: "bridgesync0012", - SQL: mig0012, - }, - } - migrations = append(migrations, treeMigrations.Migrations...) - return db.RunMigrations(dbPath, migrations) + out := make([]types.Migration, idx) + copy(out, migrations[:idx]) + return out } diff --git a/bridgesync/migrations/migrations_test.go b/bridgesync/migrations/migrations_test.go index e8217c2bc..a4728a4d7 100644 --- a/bridgesync/migrations/migrations_test.go +++ b/bridgesync/migrations/migrations_test.go @@ -7,7 +7,6 @@ import ( "testing" "github.com/agglayer/aggkit/db" - "github.com/agglayer/aggkit/db/types" "github.com/agglayer/aggkit/log" "github.com/ethereum/go-ethereum/common" migrate "github.com/rubenv/sql-migrate" @@ -266,23 +265,10 @@ func TestMigration0004(t *testing.T) { require.NoError(t, err) defer database.Close() - // Define migrations up to 0003 - migrations := []types.Migration{ - { - ID: "bridgesync0001", - SQL: mig0001, - }, - { - ID: "bridgesync0002", - SQL: mig0002, - }, - { - ID: "bridgesync0003", - SQL: mig0003, - }, - } + // Define migrations up to bridgesync0003 + migrations := GetUpTo("bridgesync0003") - // Run migrations up to 0003 (3 migrations) + // Run migrations up to bridgesync0003 (3 migrations) err = db.RunMigrationsDBExtended(log.GetDefaultLogger(), database, migrations, migrate.Up, 3) require.NoError(t, err) @@ -458,29 +444,8 @@ func TestMigration0006(t *testing.T) { require.NoError(t, err) defer database.Close() - // Define migrations up to 0005 - migrations := []types.Migration{ - { - ID: "bridgesync0001", - SQL: mig0001, - }, - { - ID: "bridgesync0002", - SQL: mig0002, - }, - { - ID: "bridgesync0003", - SQL: mig0003, - }, - { - ID: "bridgesync0004", - SQL: mig0004, - }, - { - ID: "bridgesync0005", - SQL: mig0005, - }, - } + // Define migrations up to bridgesync0005 + migrations := GetUpTo("bridgesync0005") // Run migrations up to 0005 (5 migrations) err = db.RunMigrationsDBExtended(log.GetDefaultLogger(), database, migrations, migrate.Up, 5) diff --git a/bridgesync/processor.go b/bridgesync/processor.go index 9d77de337..e517088d9 100644 --- a/bridgesync/processor.go +++ b/bridgesync/processor.go @@ -51,6 +51,9 @@ const ( // setClaimTableName is the name of the table that stores set claim events setClaimTableName = "set_claim" + // backwardLETTableName is the name of the table that stores backward local exit tree events + backwardLETTableName = "backward_let" + // nilStr holds nil string nilStr = "nil" ) @@ -119,6 +122,13 @@ var ( `, bridgeTableName) ) +type BridgeSource string + +const ( + BridgeSourceBackwardLET BridgeSource = "backward_let" + BridgeSourceForwardLET BridgeSource = "forward_let" +) + // Bridge is the representation of a bridge event type Bridge struct { BlockNum uint64 `meddler:"block_num"` @@ -135,6 +145,7 @@ type Bridge struct { Metadata []byte `meddler:"metadata"` DepositCount uint32 `meddler:"deposit_count"` TxnSender common.Address `meddler:"txn_sender,address"` + Source BridgeSource `meddler:"source"` ToAddress common.Address `meddler:"to_address,address"` } @@ -146,11 +157,11 @@ func (b *Bridge) String() string { return fmt.Sprintf("Bridge{BlockNum: %d, BlockPos: %d, FromAddress: %s, TxHash: %s, "+ "BlockTimestamp: %d, LeafType: %d, OriginNetwork: %d, OriginAddress: %s, "+ "DestinationNetwork: %d, DestinationAddress: %s, Amount: %s, Metadata: %x, "+ - "DepositCount: %d, TxnSender: %s, ToAddress: %s}", + "DepositCount: %d, TxnSender: %s, Source: %s, ToAddress: %s}", b.BlockNum, b.BlockPos, b.FromAddress.String(), b.TxHash.String(), b.BlockTimestamp, b.LeafType, b.OriginNetwork, b.OriginAddress.String(), b.DestinationNetwork, b.DestinationAddress.String(), amountStr, b.Metadata, - b.DepositCount, b.TxnSender.String(), b.ToAddress.String()) + b.DepositCount, b.TxnSender.String(), b.Source, b.ToAddress.String()) } // Hash returns the hash of the bridge event as expected by the exit tree @@ -347,55 +358,6 @@ func (c *Claim) decodePreEtrogCalldata(data []any) (bool, error) { return true, nil } -type InvalidClaim struct { - // claim struct fields - BlockNum uint64 `meddler:"block_num"` - BlockPos uint64 `meddler:"block_pos"` - TxHash common.Hash `meddler:"tx_hash,hash"` - GlobalIndex *big.Int `meddler:"global_index,bigint"` - OriginNetwork uint32 `meddler:"origin_network"` - OriginAddress common.Address `meddler:"origin_address"` - DestinationAddress common.Address `meddler:"destination_address"` - Amount *big.Int `meddler:"amount,bigint"` - ProofLocalExitRoot types.Proof `meddler:"proof_local_exit_root,merkleproof"` - ProofRollupExitRoot types.Proof `meddler:"proof_rollup_exit_root,merkleproof"` - MainnetExitRoot common.Hash `meddler:"mainnet_exit_root,hash"` - RollupExitRoot common.Hash `meddler:"rollup_exit_root,hash"` - GlobalExitRoot common.Hash `meddler:"global_exit_root,hash"` - DestinationNetwork uint32 `meddler:"destination_network"` - Metadata []byte `meddler:"metadata"` - IsMessage bool `meddler:"is_message"` - BlockTimestamp uint64 `meddler:"block_timestamp"` - // additional fields - Reason string `meddler:"reason"` - CreatedAt uint64 `meddler:"created_at"` -} - -// NewInvalidClaim creates a new InvalidClaim from a Claim and a reason -func NewInvalidClaim(c *Claim, reason string) *InvalidClaim { - return &InvalidClaim{ - BlockNum: c.BlockNum, - BlockPos: c.BlockPos, - TxHash: c.TxHash, - GlobalIndex: c.GlobalIndex, - OriginNetwork: c.OriginNetwork, - OriginAddress: c.OriginAddress, - DestinationAddress: c.DestinationAddress, - Amount: c.Amount, - ProofLocalExitRoot: c.ProofLocalExitRoot, - ProofRollupExitRoot: c.ProofRollupExitRoot, - MainnetExitRoot: c.MainnetExitRoot, - RollupExitRoot: c.RollupExitRoot, - GlobalExitRoot: c.GlobalExitRoot, - DestinationNetwork: c.DestinationNetwork, - Metadata: c.Metadata, - IsMessage: c.IsMessage, - BlockTimestamp: c.BlockTimestamp, - Reason: reason, - CreatedAt: uint64(time.Now().UTC().Unix()), - } -} - // TokenMapping representation of a NewWrappedToken event, that is emitted by the bridge contract type TokenMapping struct { BlockNum uint64 `meddler:"block_num"` @@ -485,7 +447,7 @@ func (u *UnsetClaim) String() string { } // SetClaim representation of a SetClaim event, -// that is emitted by the bridge contract when a claim is set. +// that is emitted by the L2 bridge contract when a claim is set. type SetClaim struct { BlockNum uint64 `meddler:"block_num"` BlockPos uint64 `meddler:"block_pos"` @@ -505,6 +467,32 @@ func (s *SetClaim) String() string { globalIndexStr, s.CreatedAt) } +// BackwardLET representation of a BackwardLET event, +// that is emitted by the L2 bridge contract when a LET is rolled back. +type BackwardLET struct { + BlockNum uint64 `meddler:"block_num"` + BlockPos uint64 `meddler:"block_pos"` + PreviousDepositCount *big.Int `meddler:"previous_deposit_count,bigint"` + PreviousRoot common.Hash `meddler:"previous_root,hash"` + NewDepositCount *big.Int `meddler:"new_deposit_count,bigint"` + NewRoot common.Hash `meddler:"new_root,hash"` +} + +// String returns a formatted string representation of BackwardLET for debugging and logging. +func (b *BackwardLET) String() string { + previousDepositCountStr := nilStr + if b.PreviousDepositCount != nil { + previousDepositCountStr = b.PreviousDepositCount.String() + } + newDepositCountStr := nilStr + if b.NewDepositCount != nil { + newDepositCountStr = b.NewDepositCount.String() + } + return fmt.Sprintf("BackwardLET{BlockNum: %d, BlockPos: %d, "+ + "PreviousDepositCount: %s, PreviousRoot: %s, NewDepositCount: %s, NewRoot: %s}", + b.BlockNum, b.BlockPos, previousDepositCountStr, b.PreviousRoot.String(), newDepositCountStr, b.NewRoot.String()) +} + // Event combination of bridge, claim, token mapping and legacy token migration events type Event struct { Bridge *Bridge @@ -514,6 +502,7 @@ type Event struct { RemoveLegacyToken *RemoveLegacyToken UnsetClaim *UnsetClaim SetClaim *SetClaim + BackwardLET *BackwardLET } func (e Event) String() string { @@ -539,6 +528,9 @@ func (e Event) String() string { if e.SetClaim != nil { parts = append(parts, e.SetClaim.String()) } + if e.BackwardLET != nil { + parts = append(parts, e.BackwardLET.String()) + } return "Event{" + strings.Join(parts, ", ") + "}" } @@ -562,6 +554,7 @@ func (b BridgeSyncRuntimeData) String() string { } return res } + func (b BridgeSyncRuntimeData) IsCompatible(storage BridgeSyncRuntimeData) error { tmp := sync.RuntimeData{ ChainID: b.ChainID, @@ -581,7 +574,7 @@ func (b BridgeSyncRuntimeData) IsCompatible(storage BridgeSyncRuntimeData) error type processor struct { syncerID string db *sql.DB - exitTree *tree.AppendOnlyTree + exitTree types.FullTreer log *log.Logger mu mutex.RWMutex halted bool @@ -1255,22 +1248,58 @@ func (p *processor) Reorg(ctx context.Context, firstReorgedBlock uint64) error { } }() - blocksRes, err := tx.Exec(`DELETE FROM block WHERE num >= $1;`, firstReorgedBlock) + // --------------------------------------------------------------------- + // 1. Load affected deposit counts and BackwardLETs BEFORE deleting blocks, bridges and BackwardLET entries + // --------------------------------------------------------------------- + backwardLETsQuery := ` + SELECT previous_deposit_count, new_deposit_count + FROM backward_let + WHERE block_num >= $1` + var backwardLETs []*BackwardLET + if err := meddler.QueryAll(tx, &backwardLETs, backwardLETsQuery, firstReorgedBlock); err != nil { + return fmt.Errorf("failed to retrieve the affected backward LETs: %w", err) + } + + var depositCountsToRemove map[uint32]struct{} + if len(backwardLETs) > 0 { + depositCountsToRemove, err = loadReorgedDepositCounts(tx, firstReorgedBlock) + if err != nil { + p.log.Errorf("failed to retrieve reorged bridges: %v", err) + return err + } + } + + // --------------------------------------------------------- + // 2. Delete blocks (cascade delete everything else) + // --------------------------------------------------------- + blocksRes, err := tx.Exec(`DELETE FROM block WHERE num >= $1`, firstReorgedBlock) if err != nil { p.log.Errorf("failed to delete blocks during reorg: %v", err) return err } + rowsAffected, err := blocksRes.RowsAffected() if err != nil { p.log.Errorf("failed to get rows affected during reorg: %v", err) return err } - if err = p.exitTree.Reorg(tx, firstReorgedBlock); err != nil { + // --------------------------------------------------------- + // 3. Reorg exit tree to clean state + // --------------------------------------------------------- + if err := p.exitTree.Reorg(tx, firstReorgedBlock); err != nil { p.log.Errorf("failed to reorg exit tree: %v", err) return err } + // --------------------------------------------------------- + // 4. Restore bridges removed by BackwardLET + // --------------------------------------------------------- + err = p.restoreBackwardLETBridges(tx, backwardLETs, depositCountsToRemove) + if err != nil { + return err + } + if err = tx.Commit(); err != nil { p.log.Errorf("failed to commit reorg transaction: %v", err) return err @@ -1287,6 +1316,91 @@ func (p *processor) Reorg(ctx context.Context, firstReorgedBlock uint64) error { return nil } +// restoreBackwardLETBridges restores bridges that were previously removed by BackwardLET events +func (p *processor) restoreBackwardLETBridges(tx dbtypes.Txer, backwardLETs []*BackwardLET, + removedDepositCounts map[uint32]struct{}) error { + restoreQuery := ` + SELECT * + FROM bridge_archive + WHERE deposit_count > $1 AND deposit_count <= $2 + ORDER BY deposit_count ASC + ` + + for _, backwardLET := range backwardLETs { + prev, err := aggkitcommon.SafeUint64(backwardLET.PreviousDepositCount) + if err != nil { + return fmt.Errorf("invalid previous deposit count: %w", err) + } + + next, err := aggkitcommon.SafeUint64(backwardLET.NewDepositCount) + if err != nil { + return fmt.Errorf("invalid new deposit count: %w", err) + } + + var bridges []*Bridge + if err := meddler.QueryAll(tx, &bridges, restoreQuery, next, prev); err != nil { + return err + } + + for _, b := range bridges { + if _, ok := removedDepositCounts[b.DepositCount]; ok { + // skip cascade-deleted bridges (prevent from restoring them) + continue + } + + // reset source + b.Source = "" + if err := meddler.Insert(tx, bridgeTableName, b); err != nil { + return err + } + + leaf := types.Leaf{ + Index: b.DepositCount, + Hash: b.Hash(), + } + if _, err := p.exitTree.PutLeaf(tx, b.BlockNum, b.BlockPos, leaf); err != nil { + return err + } + } + + // cleanup bridge_archive + if _, err := tx.Exec(` + DELETE FROM bridge_archive + WHERE deposit_count > $1 AND deposit_count <= $2 + `, next, prev); err != nil { + return err + } + } + + return nil +} + +// loadReorgedDepositCounts retrieves the bridges that are going to be deleted by the reorg, +// and returns its deposit counts. +// The bridges are retrieved from the bridge_archive table, because in case there were BackwardLET events, +// they would have already deleted the bridges from bridge table. +func loadReorgedDepositCounts(tx dbtypes.Txer, fromBlock uint64) (map[uint32]struct{}, error) { + rows, err := tx.Query(` + SELECT deposit_count + FROM bridge_archive + WHERE block_num >= $1 + `, fromBlock) + if err != nil { + return nil, err + } + defer rows.Close() + + result := make(map[uint32]struct{}) + for rows.Next() { + var depositCount uint32 + if err := rows.Scan(&depositCount); err != nil { + return nil, err + } + result[depositCount] = struct{}{} + } + return result, nil +} + // ProcessBlock process the events of the block to build the exit tree // and updates the last processed block (can be called without events for that purpose) func (p *processor) ProcessBlock(ctx context.Context, block sync.Block) error { @@ -1382,6 +1496,34 @@ func (p *processor) ProcessBlock(ctx context.Context, block sync.Block) error { return err } } + + if event.BackwardLET != nil { + newDepositCount, leafIndex, err := normalizeDepositCount(event.BackwardLET.NewDepositCount) + if err != nil { + return err + } + + // 1. archive and remove all the bridges whose + // deposit_count is greater than the one captured by the BackwardLET event + err = p.archiveAndDeleteBridgesAbove(ctx, tx, newDepositCount) + if err != nil { + return fmt.Errorf("failed to delete bridges above deposit count %d: %w", + newDepositCount, err) + } + + // 2. remove all leafs from the exit tree with indices greater than leafIndex in the exit tree + if err := p.exitTree.BackwardToIndex(ctx, tx, leafIndex); err != nil { + p.log.Errorf("failed to backward local exit tree to leaf index %d (deposit count: %d)", + leafIndex, newDepositCount) + return err + } + + // 3. insert the backward let event to designated table + if err = meddler.Insert(tx, backwardLETTableName, event.BackwardLET); err != nil { + p.log.Errorf("failed to insert backward local exit tree event at block %d: %v", block.Num, err) + return err + } + } } if err := tx.Commit(); err != nil { @@ -1410,6 +1552,65 @@ func (p *processor) ProcessBlock(ctx context.Context, block sync.Block) error { return nil } +// normalizeDepositCount checks whether given depositCount can fit into the uint64 and uint32 and downcasts it. +// Otherwise it returns an error. +func normalizeDepositCount(depositCount *big.Int) (uint64, uint32, error) { + u64, err := aggkitcommon.SafeUint64(depositCount) + if err != nil { + return 0, 0, fmt.Errorf("invalid deposit count: %w", err) + } + + u32, err := aggkitcommon.SafeUint32(u64) + if err != nil { + return 0, 0, fmt.Errorf("invalid deposit count: %w", err) + } + + return u64, u32, nil +} + +// archiveAndDeleteBridgesAbove archives and removes all the bridges whose depositCount is greater than the provided one +func (p *processor) archiveAndDeleteBridgesAbove(ctx context.Context, tx dbtypes.Txer, depositCount uint64) error { + // 1. Load candidates + query := fmt.Sprintf(`SELECT * FROM %s WHERE deposit_count > $1`, bridgeTableName) + var bridges []*Bridge + if err := meddler.QueryAll(tx, &bridges, query, depositCount); err != nil { + return err + } + + if len(bridges) == 0 { + return nil + } + + deletedDepositCounts := make([]uint32, 0, len(bridges)) + // 2. Archive + for _, b := range bridges { + b.Source = BridgeSourceBackwardLET + if err := meddler.Insert(tx, "bridge_archive", b); err != nil { + return err + } + deletedDepositCounts = append(deletedDepositCounts, b.DepositCount) + } + + // 3. Delete originals + deleteQuery := fmt.Sprintf(` + DELETE FROM %s + WHERE deposit_count > $1`, + bridgeTableName) + + _, err := tx.ExecContext(ctx, deleteQuery, depositCount) + if err != nil { + return err + } + + if len(deletedDepositCounts) > 0 { + p.log.Debugf("BackwardLET archived + removed %d bridges with deposit_count > %d: %v", + len(deletedDepositCounts), depositCount, deletedDepositCounts, + ) + } + + return nil +} + // GetTotalNumberOfRecords returns the total number of records in the given table func (p *processor) GetTotalNumberOfRecords(ctx context.Context, tableName, whereClause string) (int, error) { if !tableNameRegex.MatchString(tableName) { diff --git a/bridgesync/processor_test.go b/bridgesync/processor_test.go index ebd9a16b2..0d8f798f5 100644 --- a/bridgesync/processor_test.go +++ b/bridgesync/processor_test.go @@ -5,6 +5,7 @@ import ( "encoding/json" "errors" "fmt" + "math" "math/big" "os" "path" @@ -20,6 +21,7 @@ import ( "github.com/0xPolygon/cdk-contracts-tooling/contracts/aggchain-multisig/polygonzkevmbridge" bridgetypes "github.com/agglayer/aggkit/bridgeservice/types" "github.com/agglayer/aggkit/bridgesync/migrations" + bridgesynctypes "github.com/agglayer/aggkit/bridgesync/types" "github.com/agglayer/aggkit/db" "github.com/agglayer/aggkit/log" "github.com/agglayer/aggkit/sync" @@ -320,6 +322,18 @@ func TestProcessor(t *testing.T) { eventsToClaims(block5.Events), )), }, + &reorgAction{ + p: p, + description: "reorg the last block", + firstReorgedBlock: 5, + }, + &getLastProcessedBlockAction{ + p: p, + description: "after last block reorged", + ctx: context.Background(), + expectedLastProcessedBlock: 4, + expectedErr: nil, + }, } for _, a := range actions { @@ -339,13 +353,13 @@ var ( Event{Bridge: &Bridge{ BlockNum: 1, BlockPos: 0, - LeafType: 1, + LeafType: bridgesynctypes.LeafTypeAsset.Uint8(), OriginNetwork: 1, - OriginAddress: common.HexToAddress("01"), + OriginAddress: common.HexToAddress("1"), DestinationNetwork: 1, - DestinationAddress: common.HexToAddress("01"), + DestinationAddress: common.HexToAddress("1"), Amount: big.NewInt(1), - Metadata: common.Hex2Bytes("01"), + Metadata: common.Hex2Bytes("1"), DepositCount: 0, }}, Event{Claim: &Claim{ @@ -353,8 +367,8 @@ var ( BlockPos: 1, GlobalIndex: big.NewInt(1), OriginNetwork: 1, - OriginAddress: common.HexToAddress("01"), - DestinationAddress: common.HexToAddress("01"), + OriginAddress: common.HexToAddress("1"), + DestinationAddress: common.HexToAddress("1"), Amount: big.NewInt(1), MainnetExitRoot: common.Hash{}, }}, @@ -390,27 +404,39 @@ var ( Event{Bridge: &Bridge{ BlockNum: 3, BlockPos: 0, - LeafType: 2, + LeafType: bridgesynctypes.LeafTypeAsset.Uint8(), OriginNetwork: 2, - OriginAddress: common.HexToAddress("02"), + OriginAddress: common.HexToAddress("2"), DestinationNetwork: 2, - DestinationAddress: common.HexToAddress("02"), + DestinationAddress: common.HexToAddress("2"), Amount: big.NewInt(2), - Metadata: common.Hex2Bytes("02"), + Metadata: common.Hex2Bytes("2"), DepositCount: 1, }}, Event{Bridge: &Bridge{ BlockNum: 3, BlockPos: 1, - LeafType: 3, + LeafType: bridgesynctypes.LeafTypeAsset.Uint8(), OriginNetwork: 3, - OriginAddress: common.HexToAddress("03"), + OriginAddress: common.HexToAddress("3"), DestinationNetwork: 3, - DestinationAddress: common.HexToAddress("03"), + DestinationAddress: common.HexToAddress("3"), Amount: big.NewInt(0), - Metadata: common.Hex2Bytes("03"), + Metadata: common.Hex2Bytes("3"), DepositCount: 2, }}, + Event{Bridge: &Bridge{ + BlockNum: 3, + BlockPos: 2, + LeafType: bridgesynctypes.LeafTypeAsset.Uint8(), + OriginNetwork: 3, + OriginAddress: common.HexToAddress("4"), + DestinationNetwork: 3, + DestinationAddress: common.HexToAddress("4"), + Amount: big.NewInt(0), + Metadata: common.Hex2Bytes("4"), + DepositCount: 3, + }}, }, } block4 = sync.Block{ @@ -453,6 +479,12 @@ var ( BlockPos: 3, LegacyTokenAddress: common.HexToAddress("0x11"), }}, + Event{BackwardLET: &BackwardLET{ + BlockNum: 5, + BlockPos: 4, + PreviousDepositCount: big.NewInt(3), + NewDepositCount: big.NewInt(2), + }}, }, } ) @@ -996,10 +1028,6 @@ func TestGetBridgesPaged(t *testing.T) { } require.NoError(t, tx.Commit()) - depositCountPtr := func(i uint64) *uint64 { - return &i - } - testCases := []struct { name string pageSize uint32 @@ -1054,7 +1082,7 @@ func TestGetBridgesPaged(t *testing.T) { name: "t4", pageSize: 1, page: 1, - depositCount: depositCountPtr(1), + depositCount: uint64Ptr(1), expectedCount: 1, expectedBridges: []*Bridge{bridges[1]}, expectedError: "", @@ -1063,7 +1091,7 @@ func TestGetBridgesPaged(t *testing.T) { name: "t5", pageSize: 3, page: 2, - depositCount: depositCountPtr(1), + depositCount: uint64Ptr(1), expectedCount: 0, expectedBridges: []*Bridge{}, expectedError: "invalid page number for given page size and total number of bridges", @@ -1081,7 +1109,7 @@ func TestGetBridgesPaged(t *testing.T) { name: "t7", pageSize: 1, page: 1, - depositCount: depositCountPtr(0), + depositCount: uint64Ptr(0), expectedCount: 1, expectedBridges: []*Bridge{bridges[0]}, expectedError: "", @@ -1109,7 +1137,7 @@ func TestGetBridgesPaged(t *testing.T) { name: "t9", pageSize: 6, page: 1, - depositCount: depositCountPtr(3), + depositCount: uint64Ptr(3), networkIDs: []uint32{ bridges[0].DestinationNetwork, bridges[2].DestinationNetwork, @@ -1124,7 +1152,7 @@ func TestGetBridgesPaged(t *testing.T) { pageSize: 1, page: 1, fromAddress: "0xE34aaF64b29273B7D567FCFc40544c014EEe9970", - depositCount: depositCountPtr(0), + depositCount: uint64Ptr(0), expectedCount: 1, expectedBridges: []*Bridge{bridges[0]}, expectedError: "", @@ -1134,7 +1162,7 @@ func TestGetBridgesPaged(t *testing.T) { pageSize: 1, page: 1, fromAddress: "0xe34aaF64b29273B7D567FCFc40544c014EEe9970", - depositCount: depositCountPtr(0), + depositCount: uint64Ptr(0), expectedCount: 1, expectedBridges: []*Bridge{bridges[0]}, expectedError: "", @@ -2358,9 +2386,6 @@ func TestGetClaimsByGlobalIndex_Compact(t *testing.T) { oldProof := types.Proof{} oldProof[0] = common.HexToHash("0x01") - newProof := types.Proof{} - newProof[0] = common.HexToHash("0x02") - testCases := []struct { name string globalIndex *big.Int @@ -2722,6 +2747,10 @@ func intPtr(i int) *int { return &i } +func uint64Ptr(i uint64) *uint64 { + return &i +} + func TestProcessor_ErrorPathLogging(t *testing.T) { t.Parallel() @@ -5319,3 +5348,348 @@ func TestClaimColumnsSQL_ReflectionCheck(t *testing.T) { require.True(t, ok, "Missing SQL column for meddler-tag '%s'", col) } } + +func TestProcessor_BackwardLET(t *testing.T) { + buildBlocksWithSequentialBridges := func(blocksCount, bridgesPerBlock uint64, + blockNumOffset uint64, depositCountOffset uint32) []sync.Block { + blocks := make([]sync.Block, 0, blocksCount) + depositCount := depositCountOffset + for i := range blocksCount { + blockNum := i + 1 + blockNumOffset + block := sync.Block{ + Num: blockNum, + Hash: common.HexToHash(fmt.Sprintf("%x", blockNum)), + } + for blockPos := range bridgesPerBlock { + block.Events = append(block.Events, + Event{Bridge: &Bridge{ + BlockNum: blockNum, + BlockPos: blockPos, + DepositCount: depositCount, + }}) + + depositCount++ + } + + blocks = append(blocks, block) + } + return blocks + } + + collectExpectedBridgesUpTo := func(t *testing.T, blocks []sync.Block, + skipBlocks []uint64, targetDepositCount uint32) []Bridge { + t.Helper() + + bridges := make([]Bridge, 0) + for _, b := range blocks { + if slices.Contains(skipBlocks, b.Num) { + continue + } + + for _, e := range b.Events { + evt, ok := e.(Event) + require.True(t, ok) + if evt.Bridge != nil { + bridges = append(bridges, *evt.Bridge) + if evt.Bridge.DepositCount == targetDepositCount { + return bridges + } + } + } + } + return bridges + } + + testCases := []struct { + name string + setupBlocks func() []sync.Block + firstReorgedBlock *uint64 + targetDepositCount uint32 + skipBlocks []uint64 + archivedDepositCounts []uint32 + processBlockErrMsg string + }{ + { + name: "backward let after a couple of bridges", + setupBlocks: func() []sync.Block { + blocks := buildBlocksWithSequentialBridges(3, 2, 0, 0) + blocks = append(blocks, sync.Block{ + Num: uint64(len(blocks) + 1), + Hash: common.HexToHash(fmt.Sprintf("0x%x", len(blocks)+1)), + Events: []any{ + Event{BackwardLET: &BackwardLET{ + BlockNum: uint64(len(blocks) + 1), + BlockPos: 0, + PreviousDepositCount: big.NewInt(3), + NewDepositCount: big.NewInt(2), + }}, + }, + }) + + return blocks + }, + targetDepositCount: 2, + archivedDepositCounts: []uint32{3}, + }, + { + name: "backward let event with all the bridges, except the first one", + setupBlocks: func() []sync.Block { + blocks := buildBlocksWithSequentialBridges(3, 2, 0, 0) + blocks = append(blocks, sync.Block{ + Num: uint64(len(blocks) + 1), + Hash: common.HexToHash(fmt.Sprintf("0x%x", len(blocks)+1)), + Events: []any{ + Event{BackwardLET: &BackwardLET{ + BlockNum: uint64(len(blocks) + 1), + BlockPos: 0, + PreviousDepositCount: big.NewInt(5), + NewDepositCount: big.NewInt(0), + }}, + }, + }) + + return blocks + }, + targetDepositCount: 0, + archivedDepositCounts: []uint32{1, 2, 3, 4, 5}, + }, + { + name: "backward let event (only the last bridge)", + setupBlocks: func() []sync.Block { + blocks := buildBlocksWithSequentialBridges(3, 2, 0, 0) + backwardLETBlock := sync.Block{ + Num: uint64(len(blocks) + 1), + Hash: common.HexToHash(fmt.Sprintf("0x%x", len(blocks)+1)), + Events: []any{ + Event{BackwardLET: &BackwardLET{ + BlockNum: uint64(len(blocks) + 1), + BlockPos: 0, + PreviousDepositCount: big.NewInt(5), + NewDepositCount: big.NewInt(4), + }}, + }, + } + blocks = append(blocks, backwardLETBlock) + + return blocks + }, + targetDepositCount: 4, + archivedDepositCounts: []uint32{5}, + }, + { + name: "backward let event in the middle of bridges", + setupBlocks: func() []sync.Block { + blocks := buildBlocksWithSequentialBridges(2, 3, 0, 0) + backwardLETBlock := sync.Block{ + Num: uint64(len(blocks) + 1), + Hash: common.HexToHash(fmt.Sprintf("0x%x", len(blocks)+1)), + Events: []any{ + Event{BackwardLET: &BackwardLET{ + BlockNum: uint64(len(blocks) + 1), + BlockPos: 0, + PreviousDepositCount: big.NewInt(5), + NewDepositCount: big.NewInt(2), + }}, + }, + } + blocks = append(blocks, backwardLETBlock) + blocks = append(blocks, buildBlocksWithSequentialBridges(3, 2, uint64(len(blocks)), 3)...) + + return blocks + }, + targetDepositCount: 8, + skipBlocks: []uint64{2, 3}, // all the bridges from these blocks were backwarded + archivedDepositCounts: []uint32{3, 4, 5}, + }, + { + name: "overlapping backward let events", + setupBlocks: func() []sync.Block { + blocks := buildBlocksWithSequentialBridges(3, 2, 0, 0) + blocks = append(blocks, sync.Block{ + Num: uint64(len(blocks) + 1), + Hash: common.HexToHash(fmt.Sprintf("0x%x", len(blocks)+1)), + Events: []any{ + Event{BackwardLET: &BackwardLET{ + BlockNum: uint64(len(blocks) + 1), + BlockPos: 0, + PreviousDepositCount: big.NewInt(5), + NewDepositCount: big.NewInt(3), + }}, + }, + }) + blocks = append(blocks, sync.Block{ + Num: uint64(len(blocks) + 2), + Hash: common.HexToHash(fmt.Sprintf("0x%x", len(blocks)+2)), + Events: []any{ + Event{BackwardLET: &BackwardLET{ + BlockNum: uint64(len(blocks) + 2), + BlockPos: 0, + PreviousDepositCount: big.NewInt(4), + NewDepositCount: big.NewInt(3), + }}, + }, + }) + + return blocks + }, + targetDepositCount: 3, + archivedDepositCounts: []uint32{4, 5}, + }, + { + name: "backward let on empty bridge table", + setupBlocks: func() []sync.Block { + return []sync.Block{ + { + Num: 1, + Hash: common.HexToHash("0x1"), + Events: []any{ + Event{BackwardLET: &BackwardLET{ + BlockNum: 1, + BlockPos: 0, + PreviousDepositCount: big.NewInt(6), + NewDepositCount: big.NewInt(3), + }}, + }, + }} + }, + targetDepositCount: 0, + }, + { + name: "backward let invalid new deposit count (outside of uint64 range)", + setupBlocks: func() []sync.Block { + return []sync.Block{ + { + Num: 1, + Hash: common.HexToHash("0x1"), + Events: []any{ + Event{BackwardLET: &BackwardLET{ + BlockNum: 1, + BlockPos: 0, + PreviousDepositCount: big.NewInt(0), + NewDepositCount: big.NewInt(-3), + }}, + }, + }} + }, + processBlockErrMsg: "invalid deposit count: value=-3 does not fit in uint64", + }, + { + name: "backward let invalid new deposit count (outside of uint32 range)", + setupBlocks: func() []sync.Block { + return []sync.Block{ + { + Num: 1, + Hash: common.HexToHash("0x1"), + Events: []any{ + Event{BackwardLET: &BackwardLET{ + BlockNum: 1, + BlockPos: 0, + PreviousDepositCount: big.NewInt(0), + NewDepositCount: new(big.Int).SetUint64(uint64(math.MaxUint32) + 1), + }}, + }, + }} + }, + processBlockErrMsg: "invalid deposit count: value=4294967296 exceeds uint32 max", + }, + { + name: "backward let after a couple of bridges + reorg backward let", + setupBlocks: func() []sync.Block { + blocks := buildBlocksWithSequentialBridges(3, 2, 0, 0) + backwardLETBlock := sync.Block{ + Num: uint64(len(blocks) + 1), + Hash: common.HexToHash(fmt.Sprintf("0x%x", len(blocks)+1)), + Events: []any{ + Event{BackwardLET: &BackwardLET{ + BlockNum: 4, + BlockPos: 0, + PreviousDepositCount: big.NewInt(5), + NewDepositCount: big.NewInt(2), + }}, + }, + } + blocks = append(blocks, backwardLETBlock) + + return blocks + }, + firstReorgedBlock: uint64Ptr(3), + targetDepositCount: 3, + archivedDepositCounts: []uint32{3}, + }, + { + name: "backward let event in the middle of bridges + reorg backward let", + setupBlocks: func() []sync.Block { + blocks := buildBlocksWithSequentialBridges(2, 3, 0, 0) + backwardLETBlock := sync.Block{ + Num: uint64(len(blocks) + 1), + Hash: common.HexToHash(fmt.Sprintf("0x%x", len(blocks)+1)), + Events: []any{ + Event{BackwardLET: &BackwardLET{ + BlockNum: uint64(len(blocks) + 1), + BlockPos: 0, + PreviousDepositCount: big.NewInt(5), + NewDepositCount: big.NewInt(2), + }}, + }, + } + blocks = append(blocks, backwardLETBlock) + blocks = append(blocks, buildBlocksWithSequentialBridges(3, 2, uint64(len(blocks)), 3)...) + + return blocks + }, + firstReorgedBlock: uint64Ptr(3), + targetDepositCount: 5, + archivedDepositCounts: []uint32{3, 4, 5}, + }, + } + + for _, c := range testCases { + t.Run(c.name, func(t *testing.T) { + dbPath := filepath.Join(t.TempDir(), "backward_let_cases.sqlite") + require.NoError(t, migrations.RunMigrations(dbPath)) + p, err := newProcessor(dbPath, "bridge-syncer", log.GetDefaultLogger(), dbQueryTimeout) + require.NoError(t, err) + + blocks := c.setupBlocks() + for _, b := range blocks { + err = p.ProcessBlock(t.Context(), b) + if c.processBlockErrMsg != "" { + require.ErrorContains(t, err, c.processBlockErrMsg) + } else { + require.NoError(t, err) + } + } + + if len(c.archivedDepositCounts) > 0 { + archivedBridgeQuery := ` + SELECT * FROM bridge_archive + WHERE deposit_count <= $1 + ORDER BY deposit_count ASC` + + maxDepositCount := slices.Max(c.archivedDepositCounts) + var archivedBridges []*Bridge + err = meddler.QueryAll(p.db, &archivedBridges, archivedBridgeQuery, maxDepositCount) + require.NoError(t, err) + + require.Len(t, archivedBridges, len(c.archivedDepositCounts)) + for i, b := range archivedBridges { + require.Equal(t, c.archivedDepositCounts[i], b.DepositCount) + require.Equal(t, BridgeSourceBackwardLET, b.Source) + } + } + + if c.firstReorgedBlock != nil { + err = p.Reorg(t.Context(), *c.firstReorgedBlock) + require.NoError(t, err) + } + + lastProcessedBlock, err := p.GetLastProcessedBlock(t.Context()) + require.NoError(t, err) + expectedBridges := collectExpectedBridgesUpTo(t, blocks, c.skipBlocks, c.targetDepositCount) + + actualBridges, err := p.GetBridges(t.Context(), 0, lastProcessedBlock) + require.NoError(t, err) + require.Equal(t, expectedBridges, actualBridges) + }) + } +} diff --git a/common/common.go b/common/common.go index 957b40a28..c1a45c6ea 100644 --- a/common/common.go +++ b/common/common.go @@ -3,6 +3,7 @@ package common import ( "crypto/ecdsa" "encoding/binary" + "errors" "fmt" "math" "math/big" @@ -186,3 +187,25 @@ func ParseUint64HexOrDecimal(str string) (uint64, error) { } return num, nil } + +// SafeUint64 converts big.Int into uint64, if it fits into it. +// Otherwise it returns an error. +func SafeUint64(i *big.Int) (uint64, error) { + if i == nil { + return 0, errors.New("value is undefined") + } + + if !i.IsUint64() { + return 0, fmt.Errorf("value=%v does not fit in uint64", i) + } + return i.Uint64(), nil +} + +// SafeUint32 downcasts the provided uint64 value to uint32, if it fits into it. +// Otherwise it returns an error. +func SafeUint32(v uint64) (uint32, error) { + if v > math.MaxUint32 { + return 0, fmt.Errorf("value=%d exceeds uint32 max (%d)", v, math.MaxUint32) + } + return uint32(v), nil +} diff --git a/common/common_test.go b/common/common_test.go index fdf9ba65d..520526d0a 100644 --- a/common/common_test.go +++ b/common/common_test.go @@ -512,3 +512,112 @@ func TestParseUint64HexOrDecimal(t *testing.T) { }) } } + +func TestSafeUint64(t *testing.T) { + tests := []struct { + name string + input *big.Int + want uint64 + expectErr bool + }{ + { + name: "nil value", + input: nil, + expectErr: true, + }, + { + name: "zero", + input: big.NewInt(0), + want: 0, + expectErr: false, + }, + { + name: "small positive number", + input: big.NewInt(42), + want: 42, + expectErr: false, + }, + { + name: "max uint64", + input: new(big.Int).SetUint64(math.MaxUint64), + want: math.MaxUint64, + expectErr: false, + }, + { + name: "negative value", + input: big.NewInt(-1), + expectErr: true, + }, + { + name: "overflow uint64", + input: new(big.Int).Add(new(big.Int).SetUint64(math.MaxUint64), big.NewInt(1)), + expectErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := SafeUint64(tt.input) + + if tt.expectErr { + require.Error(t, err) + return + } + + require.NoError(t, err) + require.Equal(t, tt.want, got) + }) + } +} + +func TestSafeUint32(t *testing.T) { + tests := []struct { + name string + input uint64 + want uint32 + expectErr bool + }{ + { + name: "zero", + input: 0, + want: 0, + expectErr: false, + }, + { + name: "small value", + input: 123, + want: 123, + expectErr: false, + }, + { + name: "max uint32", + input: math.MaxUint32, + want: math.MaxUint32, + expectErr: false, + }, + { + name: "just above max uint32", + input: uint64(math.MaxUint32) + 1, + expectErr: true, + }, + { + name: "max uint64", + input: math.MaxUint64, + expectErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := SafeUint32(tt.input) + + if tt.expectErr { + require.Error(t, err) + return + } + + require.NoError(t, err) + require.Equal(t, tt.want, got) + }) + } +} diff --git a/db/mocks/mock_d_ber.go b/db/mocks/mock_d_ber.go index c60585aae..76f54538d 100644 --- a/db/mocks/mock_d_ber.go +++ b/db/mocks/mock_d_ber.go @@ -150,6 +150,76 @@ func (_c *DBer_Exec_Call) RunAndReturn(run func(string, ...interface{}) (sql.Res return _c } +// ExecContext provides a mock function with given fields: ctx, query, args +func (_m *DBer) ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error) { + var _ca []interface{} + _ca = append(_ca, ctx, query) + _ca = append(_ca, args...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for ExecContext") + } + + var r0 sql.Result + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, ...interface{}) (sql.Result, error)); ok { + return rf(ctx, query, args...) + } + if rf, ok := ret.Get(0).(func(context.Context, string, ...interface{}) sql.Result); ok { + r0 = rf(ctx, query, args...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(sql.Result) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string, ...interface{}) error); ok { + r1 = rf(ctx, query, args...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// DBer_ExecContext_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ExecContext' +type DBer_ExecContext_Call struct { + *mock.Call +} + +// ExecContext is a helper method to define mock.On call +// - ctx context.Context +// - query string +// - args ...interface{} +func (_e *DBer_Expecter) ExecContext(ctx interface{}, query interface{}, args ...interface{}) *DBer_ExecContext_Call { + return &DBer_ExecContext_Call{Call: _e.mock.On("ExecContext", + append([]interface{}{ctx, query}, args...)...)} +} + +func (_c *DBer_ExecContext_Call) Run(run func(ctx context.Context, query string, args ...interface{})) *DBer_ExecContext_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]interface{}, len(args)-2) + for i, a := range args[2:] { + if a != nil { + variadicArgs[i] = a.(interface{}) + } + } + run(args[0].(context.Context), args[1].(string), variadicArgs...) + }) + return _c +} + +func (_c *DBer_ExecContext_Call) Return(_a0 sql.Result, _a1 error) *DBer_ExecContext_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *DBer_ExecContext_Call) RunAndReturn(run func(context.Context, string, ...interface{}) (sql.Result, error)) *DBer_ExecContext_Call { + _c.Call.Return(run) + return _c +} + // Query provides a mock function with given fields: query, args func (_m *DBer) Query(query string, args ...interface{}) (*sql.Rows, error) { var _ca []interface{} diff --git a/db/mocks/mock_querier.go b/db/mocks/mock_querier.go index 719ef6a1d..5bf4dc754 100644 --- a/db/mocks/mock_querier.go +++ b/db/mocks/mock_querier.go @@ -91,6 +91,76 @@ func (_c *Querier_Exec_Call) RunAndReturn(run func(string, ...interface{}) (sql. return _c } +// ExecContext provides a mock function with given fields: ctx, query, args +func (_m *Querier) ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error) { + var _ca []interface{} + _ca = append(_ca, ctx, query) + _ca = append(_ca, args...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for ExecContext") + } + + var r0 sql.Result + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, ...interface{}) (sql.Result, error)); ok { + return rf(ctx, query, args...) + } + if rf, ok := ret.Get(0).(func(context.Context, string, ...interface{}) sql.Result); ok { + r0 = rf(ctx, query, args...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(sql.Result) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string, ...interface{}) error); ok { + r1 = rf(ctx, query, args...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Querier_ExecContext_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ExecContext' +type Querier_ExecContext_Call struct { + *mock.Call +} + +// ExecContext is a helper method to define mock.On call +// - ctx context.Context +// - query string +// - args ...interface{} +func (_e *Querier_Expecter) ExecContext(ctx interface{}, query interface{}, args ...interface{}) *Querier_ExecContext_Call { + return &Querier_ExecContext_Call{Call: _e.mock.On("ExecContext", + append([]interface{}{ctx, query}, args...)...)} +} + +func (_c *Querier_ExecContext_Call) Run(run func(ctx context.Context, query string, args ...interface{})) *Querier_ExecContext_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]interface{}, len(args)-2) + for i, a := range args[2:] { + if a != nil { + variadicArgs[i] = a.(interface{}) + } + } + run(args[0].(context.Context), args[1].(string), variadicArgs...) + }) + return _c +} + +func (_c *Querier_ExecContext_Call) Return(_a0 sql.Result, _a1 error) *Querier_ExecContext_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *Querier_ExecContext_Call) RunAndReturn(run func(context.Context, string, ...interface{}) (sql.Result, error)) *Querier_ExecContext_Call { + _c.Call.Return(run) + return _c +} + // Query provides a mock function with given fields: query, args func (_m *Querier) Query(query string, args ...interface{}) (*sql.Rows, error) { var _ca []interface{} diff --git a/db/mocks/mock_sql_txer.go b/db/mocks/mock_sql_txer.go index 4da1b269f..0730d62fb 100644 --- a/db/mocks/mock_sql_txer.go +++ b/db/mocks/mock_sql_txer.go @@ -136,6 +136,76 @@ func (_c *SQLTxer_Exec_Call) RunAndReturn(run func(string, ...interface{}) (sql. return _c } +// ExecContext provides a mock function with given fields: ctx, query, args +func (_m *SQLTxer) ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error) { + var _ca []interface{} + _ca = append(_ca, ctx, query) + _ca = append(_ca, args...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for ExecContext") + } + + var r0 sql.Result + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, ...interface{}) (sql.Result, error)); ok { + return rf(ctx, query, args...) + } + if rf, ok := ret.Get(0).(func(context.Context, string, ...interface{}) sql.Result); ok { + r0 = rf(ctx, query, args...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(sql.Result) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string, ...interface{}) error); ok { + r1 = rf(ctx, query, args...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// SQLTxer_ExecContext_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ExecContext' +type SQLTxer_ExecContext_Call struct { + *mock.Call +} + +// ExecContext is a helper method to define mock.On call +// - ctx context.Context +// - query string +// - args ...interface{} +func (_e *SQLTxer_Expecter) ExecContext(ctx interface{}, query interface{}, args ...interface{}) *SQLTxer_ExecContext_Call { + return &SQLTxer_ExecContext_Call{Call: _e.mock.On("ExecContext", + append([]interface{}{ctx, query}, args...)...)} +} + +func (_c *SQLTxer_ExecContext_Call) Run(run func(ctx context.Context, query string, args ...interface{})) *SQLTxer_ExecContext_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]interface{}, len(args)-2) + for i, a := range args[2:] { + if a != nil { + variadicArgs[i] = a.(interface{}) + } + } + run(args[0].(context.Context), args[1].(string), variadicArgs...) + }) + return _c +} + +func (_c *SQLTxer_ExecContext_Call) Return(_a0 sql.Result, _a1 error) *SQLTxer_ExecContext_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *SQLTxer_ExecContext_Call) RunAndReturn(run func(context.Context, string, ...interface{}) (sql.Result, error)) *SQLTxer_ExecContext_Call { + _c.Call.Return(run) + return _c +} + // Query provides a mock function with given fields: query, args func (_m *SQLTxer) Query(query string, args ...interface{}) (*sql.Rows, error) { var _ca []interface{} diff --git a/db/mocks/mock_txer.go b/db/mocks/mock_txer.go index 510a56462..75e613ae9 100644 --- a/db/mocks/mock_txer.go +++ b/db/mocks/mock_txer.go @@ -202,6 +202,76 @@ func (_c *Txer_Exec_Call) RunAndReturn(run func(string, ...interface{}) (sql.Res return _c } +// ExecContext provides a mock function with given fields: ctx, query, args +func (_m *Txer) ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error) { + var _ca []interface{} + _ca = append(_ca, ctx, query) + _ca = append(_ca, args...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for ExecContext") + } + + var r0 sql.Result + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, ...interface{}) (sql.Result, error)); ok { + return rf(ctx, query, args...) + } + if rf, ok := ret.Get(0).(func(context.Context, string, ...interface{}) sql.Result); ok { + r0 = rf(ctx, query, args...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(sql.Result) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string, ...interface{}) error); ok { + r1 = rf(ctx, query, args...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Txer_ExecContext_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ExecContext' +type Txer_ExecContext_Call struct { + *mock.Call +} + +// ExecContext is a helper method to define mock.On call +// - ctx context.Context +// - query string +// - args ...interface{} +func (_e *Txer_Expecter) ExecContext(ctx interface{}, query interface{}, args ...interface{}) *Txer_ExecContext_Call { + return &Txer_ExecContext_Call{Call: _e.mock.On("ExecContext", + append([]interface{}{ctx, query}, args...)...)} +} + +func (_c *Txer_ExecContext_Call) Run(run func(ctx context.Context, query string, args ...interface{})) *Txer_ExecContext_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]interface{}, len(args)-2) + for i, a := range args[2:] { + if a != nil { + variadicArgs[i] = a.(interface{}) + } + } + run(args[0].(context.Context), args[1].(string), variadicArgs...) + }) + return _c +} + +func (_c *Txer_ExecContext_Call) Return(_a0 sql.Result, _a1 error) *Txer_ExecContext_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *Txer_ExecContext_Call) RunAndReturn(run func(context.Context, string, ...interface{}) (sql.Result, error)) *Txer_ExecContext_Call { + _c.Call.Return(run) + return _c +} + // Query provides a mock function with given fields: query, args func (_m *Txer) Query(query string, args ...interface{}) (*sql.Rows, error) { var _ca []interface{} diff --git a/db/types/interface.go b/db/types/interface.go index ae2b4aebe..ff54d3ac7 100644 --- a/db/types/interface.go +++ b/db/types/interface.go @@ -11,6 +11,7 @@ import ( // Implementations of this interface can be used to generalize database access logic. type Querier interface { Exec(query string, args ...interface{}) (sql.Result, error) + ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error) Query(query string, args ...interface{}) (*sql.Rows, error) QueryRow(query string, args ...interface{}) *sql.Row QueryContext(ctx context.Context, query string, args ...interface{}) (*sql.Rows, error) diff --git a/tree/tree.go b/tree/tree.go index 019d591c4..e685c3b0c 100644 --- a/tree/tree.go +++ b/tree/tree.go @@ -133,14 +133,11 @@ func (t *Tree) getRHTNode(tx dbtypes.Querier, nodeHash common.Hash) (*types.Tree } func (t *Tree) storeNodes(tx dbtypes.Txer, nodes []types.TreeNode) error { - for i := 0; i < len(nodes); i++ { - if err := meddler.Insert(tx, t.rhtTable, &nodes[i]); err != nil { - if sqliteErr, ok := db.SQLiteErr(err); ok { - if sqliteErr.ExtendedCode == db.UniqueConstrain { - // ignore repeated entries. This is likely to happen due to not - // cleaning RHT when reorg - continue - } + for _, node := range nodes { + if err := meddler.Insert(tx, t.rhtTable, &node); err != nil { + if sqliteErr, ok := db.SQLiteErr(err); ok && sqliteErr.ExtendedCode == db.UniqueConstrain { + // ignore repeated entries + continue } return err } @@ -246,12 +243,22 @@ func (t *Tree) Reorg(tx dbtypes.Txer, firstReorgedBlock uint64) error { return err } +// BackwardToIndex deletes all the roots with index higher than targetIndex +func (t *Tree) BackwardToIndex(ctx context.Context, tx dbtypes.Txer, targetIndex uint32) error { + _, err := tx.ExecContext( + ctx, + fmt.Sprintf(`DELETE FROM %s WHERE position > $1`, t.rootTable), + targetIndex, + ) + return err +} + // CalculateRoot calculates the Merkle Root based on the leaf and proof of inclusion func CalculateRoot(leafHash common.Hash, proof [types.DefaultHeight]common.Hash, index uint32) common.Hash { node := leafHash // Compute the Merkle root - for height := uint8(0); height < types.DefaultHeight; height++ { + for height := range types.DefaultHeight { if (index>>height)&1 == 1 { node = crypto.Keccak256Hash(proof[height].Bytes(), node.Bytes()) } else { diff --git a/tree/tree_test.go b/tree/tree_test.go index 2021214b8..771b1eb55 100644 --- a/tree/tree_test.go +++ b/tree/tree_test.go @@ -306,6 +306,114 @@ func TestVerifyProof(t *testing.T) { } } +func TestTree_BackwardToIndex(t *testing.T) { + t.Parallel() + ctx := context.Background() + + t.Run("deletes roots with index higher than targetIndex", func(t *testing.T) { + t.Parallel() + + treeDB := createTreeDBForTest(t) + tree := NewAppendOnlyTree(treeDB, "") + + // Add 8 leaves (roots with indices 0..7) + putTestLeaves(t, tree, treeDB, 8, 0) + + // Confirm all roots exist + for i := range 8 { + root, err := tree.GetRootByIndex(ctx, uint32(i)) + require.NoError(t, err) + require.Equal(t, uint32(i), root.Index) + } + + tx, err := db.NewTx(context.Background(), treeDB) + require.NoError(t, err) + + // Delete roots with index > 4 + require.NoError(t, tree.BackwardToIndex(ctx, tx, 4)) + require.NoError(t, tx.Commit()) + + // Roots with index 0..4 should exist + for i := 0; i <= 4; i++ { + root, err := tree.GetRootByIndex(ctx, uint32(i)) + require.NoError(t, err) + require.Equal(t, uint32(i), root.Index) + } + + // Roots with index 5..7 should not exist + for i := 5; i < 8; i++ { + _, err := tree.GetRootByIndex(ctx, uint32(i)) + require.Error(t, err) + require.ErrorIs(t, err, db.ErrNotFound) + } + + // Add more leaves to confirm tree is still functional + putTestLeaves(t, tree, treeDB, 3, 5) // adding leaves with indices 5,6,7 + + // Confirm new roots exist + for i := range 8 { + root, err := tree.GetRootByIndex(ctx, uint32(i)) + require.NoError(t, err) + require.Equal(t, uint32(i), root.Index) + } + }) + + t.Run("no roots deleted if none above targetIndex", func(t *testing.T) { + t.Parallel() + + treeDB := createTreeDBForTest(t) + tree := NewAppendOnlyTree(treeDB, "") + + putTestLeaves(t, tree, treeDB, 3, 0) + + tx, err := db.NewTx(context.Background(), treeDB) + require.NoError(t, err) + + require.NoError(t, tree.BackwardToIndex(ctx, tx, 10)) + require.NoError(t, tx.Commit()) + + // All roots should still exist + for i := range 3 { + root, err := tree.GetRootByIndex(ctx, uint32(i)) + require.NoError(t, err) + require.Equal(t, uint32(i), root.Index) + } + }) + + t.Run("handles empty table gracefully", func(t *testing.T) { + t.Parallel() + + treeDB := createTreeDBForTest(t) + tree := NewAppendOnlyTree(treeDB, "") + + tx, err := db.NewTx(context.Background(), treeDB) + require.NoError(t, err) + + require.NoError(t, tree.BackwardToIndex(ctx, tx, 0)) + }) + + t.Run("returns error on database failure", func(t *testing.T) { + t.Parallel() + + dbPath := path.Join(t.TempDir(), "tree_BackwardToIndex_dberr.sqlite") + require.NoError(t, migrations.RunMigrations(dbPath)) + treeDB, err := db.NewSQLiteDB(dbPath) + require.NoError(t, err) + + // Intentionally invalid table name + tree := &Tree{ + db: treeDB, + rootTable: "nonexistent_table", + } + + tx, err := db.NewTx(context.Background(), treeDB) + require.NoError(t, err) + + err = tree.BackwardToIndex(ctx, tx, 0) + require.ErrorContains(t, err, "no such table") + }) +} + func createTreeDBForTest(t *testing.T) *sql.DB { t.Helper() diff --git a/tree/types/interfaces.go b/tree/types/interfaces.go index 997393559..670b71610 100644 --- a/tree/types/interfaces.go +++ b/tree/types/interfaces.go @@ -25,6 +25,7 @@ type LeafWriter interface { type ReorganizeTreer interface { ReadTreer Reorg(tx dbtypes.Txer, firstReorgedBlock uint64) error + BackwardToIndex(ctx context.Context, tx dbtypes.Txer, targetIndex uint32) error } // FullTreer = fully-capable tree (read, write, reorg) diff --git a/tree/types/mocks/mock_full_treer.go b/tree/types/mocks/mock_full_treer.go index 3ab36cb35..91187f9ff 100644 --- a/tree/types/mocks/mock_full_treer.go +++ b/tree/types/mocks/mock_full_treer.go @@ -27,6 +27,54 @@ func (_m *FullTreer) EXPECT() *FullTreer_Expecter { return &FullTreer_Expecter{mock: &_m.Mock} } +// BackwardToIndex provides a mock function with given fields: ctx, tx, targetIndex +func (_m *FullTreer) BackwardToIndex(ctx context.Context, tx types.Txer, targetIndex uint32) error { + ret := _m.Called(ctx, tx, targetIndex) + + if len(ret) == 0 { + panic("no return value specified for BackwardToIndex") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, types.Txer, uint32) error); ok { + r0 = rf(ctx, tx, targetIndex) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// FullTreer_BackwardToIndex_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'BackwardToIndex' +type FullTreer_BackwardToIndex_Call struct { + *mock.Call +} + +// BackwardToIndex is a helper method to define mock.On call +// - ctx context.Context +// - tx types.Txer +// - targetIndex uint32 +func (_e *FullTreer_Expecter) BackwardToIndex(ctx interface{}, tx interface{}, targetIndex interface{}) *FullTreer_BackwardToIndex_Call { + return &FullTreer_BackwardToIndex_Call{Call: _e.mock.On("BackwardToIndex", ctx, tx, targetIndex)} +} + +func (_c *FullTreer_BackwardToIndex_Call) Run(run func(ctx context.Context, tx types.Txer, targetIndex uint32)) *FullTreer_BackwardToIndex_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(types.Txer), args[2].(uint32)) + }) + return _c +} + +func (_c *FullTreer_BackwardToIndex_Call) Return(_a0 error) *FullTreer_BackwardToIndex_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *FullTreer_BackwardToIndex_Call) RunAndReturn(run func(context.Context, types.Txer, uint32) error) *FullTreer_BackwardToIndex_Call { + _c.Call.Return(run) + return _c +} + // GetLastRoot provides a mock function with given fields: tx func (_m *FullTreer) GetLastRoot(tx types.Querier) (treetypes.Root, error) { ret := _m.Called(tx) diff --git a/tree/types/mocks/mock_reorganize_treer.go b/tree/types/mocks/mock_reorganize_treer.go index cc97ea5a4..2f8b51d12 100644 --- a/tree/types/mocks/mock_reorganize_treer.go +++ b/tree/types/mocks/mock_reorganize_treer.go @@ -27,6 +27,54 @@ func (_m *ReorganizeTreer) EXPECT() *ReorganizeTreer_Expecter { return &ReorganizeTreer_Expecter{mock: &_m.Mock} } +// BackwardToIndex provides a mock function with given fields: ctx, tx, targetIndex +func (_m *ReorganizeTreer) BackwardToIndex(ctx context.Context, tx types.Txer, targetIndex uint32) error { + ret := _m.Called(ctx, tx, targetIndex) + + if len(ret) == 0 { + panic("no return value specified for BackwardToIndex") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, types.Txer, uint32) error); ok { + r0 = rf(ctx, tx, targetIndex) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// ReorganizeTreer_BackwardToIndex_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'BackwardToIndex' +type ReorganizeTreer_BackwardToIndex_Call struct { + *mock.Call +} + +// BackwardToIndex is a helper method to define mock.On call +// - ctx context.Context +// - tx types.Txer +// - targetIndex uint32 +func (_e *ReorganizeTreer_Expecter) BackwardToIndex(ctx interface{}, tx interface{}, targetIndex interface{}) *ReorganizeTreer_BackwardToIndex_Call { + return &ReorganizeTreer_BackwardToIndex_Call{Call: _e.mock.On("BackwardToIndex", ctx, tx, targetIndex)} +} + +func (_c *ReorganizeTreer_BackwardToIndex_Call) Run(run func(ctx context.Context, tx types.Txer, targetIndex uint32)) *ReorganizeTreer_BackwardToIndex_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(types.Txer), args[2].(uint32)) + }) + return _c +} + +func (_c *ReorganizeTreer_BackwardToIndex_Call) Return(_a0 error) *ReorganizeTreer_BackwardToIndex_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *ReorganizeTreer_BackwardToIndex_Call) RunAndReturn(run func(context.Context, types.Txer, uint32) error) *ReorganizeTreer_BackwardToIndex_Call { + _c.Call.Return(run) + return _c +} + // GetLastRoot provides a mock function with given fields: tx func (_m *ReorganizeTreer) GetLastRoot(tx types.Querier) (treetypes.Root, error) { ret := _m.Called(tx)