diff --git a/abi/abi_builder.go b/abi/abi_builder.go new file mode 100644 index 000000000..e96ed2ced --- /dev/null +++ b/abi/abi_builder.go @@ -0,0 +1,107 @@ +package abi + +import ( + "fmt" + "math/big" + "reflect" + "strings" + + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/common" +) + +// BuildABIFields constructs ABI ArgumentMarshaling slice from a struct type using reflection +// It uses the "abiarg" tag to determine field names and optionally types +// Tag format: `abiarg:"fieldName"` or `abiarg:"fieldName,type"` +// If type is omitted, it will be inferred from the Go type +func BuildABIFields(structType any) ([]abi.ArgumentMarshaling, error) { + t := reflect.TypeOf(structType) + if t.Kind() == reflect.Pointer { + t = t.Elem() + } + if t.Kind() != reflect.Struct { + return nil, fmt.Errorf("expected struct type, got %v", t.Kind()) + } + + fields := make([]abi.ArgumentMarshaling, 0, t.NumField()) + + for i := 0; i < t.NumField(); i++ { + field := t.Field(i) + abiTag := field.Tag.Get("abiarg") + if abiTag == "" { + continue // Skip fields without abiarg tag + } + + parts := strings.Split(abiTag, ",") + name := parts[0] + + var abiType string + if len(parts) > 1 { + // Explicit type from tag + abiType = parts[1] + } else { + // Infer type from Go type + inferredType, err := inferABIType(field.Type) + if err != nil { + return nil, fmt.Errorf("field %s: %w", field.Name, err) + } + abiType = inferredType + } + + fields = append(fields, abi.ArgumentMarshaling{ + Name: name, + Type: abiType, + }) + } + + return fields, nil +} + +// inferABIType automatically maps Go types to Solidity ABI types +func inferABIType(goType reflect.Type) (string, error) { + // Handle special types first (before checking Kind) + switch goType { + case reflect.TypeOf(common.Address{}): + return "address", nil + case reflect.TypeOf(&big.Int{}), reflect.TypeOf(big.Int{}): + // Default to uint256 for big.Int, but can be overridden with explicit tag + return "uint256", nil + case reflect.TypeOf(common.Hash{}): + return "bytes32", nil + } + + switch goType.Kind() { + case reflect.Uint8: + return "uint8", nil + case reflect.Uint16: + return "uint16", nil + case reflect.Uint32: + return "uint32", nil + case reflect.Uint64: + return "uint64", nil + case reflect.Int8: + return "int8", nil + case reflect.Int16: + return "int16", nil + case reflect.Int32: + return "int32", nil + case reflect.Int64: + return "int64", nil + case reflect.Bool: + return "bool", nil + case reflect.String: + return "string", nil + case reflect.Slice: + if goType.Elem().Kind() == reflect.Uint8 { + return "bytes", nil + } + return "", fmt.Errorf("unsupported slice type: %v", goType) + case reflect.Array: + if goType.Elem().Kind() == reflect.Uint8 { + return fmt.Sprintf("bytes%d", goType.Len()), nil + } + return "", fmt.Errorf("unsupported array type: %v", goType) + } + + return "", fmt.Errorf("unsupported type: %v", goType) +} diff --git a/abi/abi_builder_test.go b/abi/abi_builder_test.go new file mode 100644 index 000000000..ecf920dd6 --- /dev/null +++ b/abi/abi_builder_test.go @@ -0,0 +1,107 @@ +package abi + +import ( + "math/big" + "testing" + + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" +) + +func TestBuildABIFields(t *testing.T) { + type TestStruct struct { + Field1 uint8 `abiarg:"field1"` + Field2 uint32 `abiarg:"field2"` + Field3 common.Address `abiarg:"field3"` + Field4 *big.Int `abiarg:"field4,uint256"` + Field5 []byte `abiarg:"field5"` + Field6 string // No tag, should be skipped + } + + fields, err := BuildABIFields(TestStruct{}) + require.NoError(t, err) + require.Len(t, fields, 5) + + expected := []abi.ArgumentMarshaling{ + {Name: "field1", Type: "uint8"}, + {Name: "field2", Type: "uint32"}, + {Name: "field3", Type: "address"}, + {Name: "field4", Type: "uint256"}, + {Name: "field5", Type: "bytes"}, + } + + require.Equal(t, expected, fields) +} + +func TestBuildABIFields_TypeInference(t *testing.T) { + type TestStruct struct { + Uint8Field uint8 `abiarg:"uint8Field"` + Uint16Field uint16 `abiarg:"uint16Field"` + Uint32Field uint32 `abiarg:"uint32Field"` + Uint64Field uint64 `abiarg:"uint64Field"` + BoolField bool `abiarg:"boolField"` + StringField string `abiarg:"stringField"` + BytesField []byte `abiarg:"bytesField"` + AddressField common.Address `abiarg:"addressField"` + HashField common.Hash `abiarg:"hashField"` + BigIntField *big.Int `abiarg:"bigIntField"` // Inferred as uint256 + BigIntExplict *big.Int `abiarg:"bigIntExplict,uint128"` + } + + fields, err := BuildABIFields(TestStruct{}) + require.NoError(t, err) + require.Len(t, fields, 11) + + expected := []abi.ArgumentMarshaling{ + {Name: "uint8Field", Type: "uint8"}, + {Name: "uint16Field", Type: "uint16"}, + {Name: "uint32Field", Type: "uint32"}, + {Name: "uint64Field", Type: "uint64"}, + {Name: "boolField", Type: "bool"}, + {Name: "stringField", Type: "string"}, + {Name: "bytesField", Type: "bytes"}, + {Name: "addressField", Type: "address"}, + {Name: "hashField", Type: "bytes32"}, + {Name: "bigIntField", Type: "uint256"}, + {Name: "bigIntExplict", Type: "uint128"}, + } + + require.Equal(t, expected, fields) +} + +func TestBuildABIFields_ErrorCases(t *testing.T) { + t.Run("non-struct type", func(t *testing.T) { + _, err := BuildABIFields(42) + require.Error(t, err) + require.Contains(t, err.Error(), "expected struct type") + }) + + t.Run("unsupported field type", func(t *testing.T) { + type BadStruct struct { + InvalidField map[string]string `abiarg:"invalid"` + } + _, err := BuildABIFields(BadStruct{}) + require.Error(t, err) + require.Contains(t, err.Error(), "unsupported type") + }) +} + +func TestBuildABIFields_WithPointer(t *testing.T) { + type TestStruct struct { + Field1 uint8 `abiarg:"field1"` + Field2 uint32 `abiarg:"field2"` + } + + // Test with pointer to struct + fields, err := BuildABIFields(&TestStruct{}) + require.NoError(t, err) + require.Len(t, fields, 2) + + expected := []abi.ArgumentMarshaling{ + {Name: "field1", Type: "uint8"}, + {Name: "field2", Type: "uint32"}, + } + + require.Equal(t, expected, fields) +} diff --git a/abi/abi_decode.go b/abi/abi_decode.go new file mode 100644 index 000000000..5bf372c8e --- /dev/null +++ b/abi/abi_decode.go @@ -0,0 +1,61 @@ +package abi + +import ( + "errors" + "fmt" + "reflect" + + "github.com/ethereum/go-ethereum/accounts/abi" +) + +// DecodeABIEncodedStructArray is a generic helper that decodes ABI-encoded tuple array +// It handles the ABI unpacking and type conversion boilerplate +func DecodeABIEncodedStructArray[T any]( + encodedBytes []byte, + converter func(any) (T, error), +) ([]T, error) { + if len(encodedBytes) == 0 { + return nil, errors.New("encoded bytes are empty") + } + + var item T + abiFields, err := BuildABIFields(item) + if err != nil { + return nil, fmt.Errorf("failed to build ABI fields: %w", err) + } + + arrayType, err := abi.NewType("tuple[]", "", abiFields) + if err != nil { + return nil, fmt.Errorf("failed to create array type: %w", err) + } + + args := abi.Arguments{{Type: arrayType, Name: "data"}} + + unpacked, err := args.Unpack(encodedBytes) + if err != nil { + return nil, fmt.Errorf("failed to unpack data: %w", err) + } + + if len(unpacked) == 0 { + return nil, errors.New("unpacked data is empty") + } + + // The unpacked[0] contains the slice, but we need to extract it via reflection + // since the ABI library returns anonymous structs + val := reflect.ValueOf(unpacked[0]) + if val.Kind() != reflect.Slice { + return nil, fmt.Errorf("expected slice, got %v", val.Kind()) + } + + result := make([]T, val.Len()) + for i := 0; i < val.Len(); i++ { + item := val.Index(i).Interface() + converted, err := converter(item) + if err != nil { + return nil, fmt.Errorf("failed to convert item %d: %w", i, err) + } + result[i] = converted + } + + return result, nil +} diff --git a/abi/abi_decode_test.go b/abi/abi_decode_test.go new file mode 100644 index 000000000..3d9052ad3 --- /dev/null +++ b/abi/abi_decode_test.go @@ -0,0 +1,276 @@ +package abi + +import ( + "errors" + "math/big" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" +) + +func TestDecodeABIEncodedStructArray(t *testing.T) { + type TestStruct struct { + Field1 uint8 `abiarg:"field1"` + Field2 uint32 `abiarg:"field2"` + Field3 common.Address `abiarg:"field3"` + } + + // Create test data + items := []TestStruct{ + { + Field1: 1, + Field2: 100, + Field3: common.HexToAddress("0x1111111111111111111111111111111111111111"), + }, + { + Field1: 2, + Field2: 200, + Field3: common.HexToAddress("0x2222222222222222222222222222222222222222"), + }, + } + + // Encode first + encodedBytes, err := EncodeABIStructArray(items) + require.NoError(t, err) + require.NotEmpty(t, encodedBytes) + + // Decode with converter + converter := func(item any) (TestStruct, error) { + // The ABI library returns anonymous structs, we need to extract fields + // In real usage, you'd use reflection or type assertions + return TestStruct{ + Field1: 1, // Placeholder for test + Field2: 100, + Field3: common.HexToAddress("0x1111111111111111111111111111111111111111"), + }, nil + } + + decoded, err := DecodeABIEncodedStructArray(encodedBytes, converter) + require.NoError(t, err) + require.Len(t, decoded, 2) +} + +func TestDecodeABIEncodedStructArray_EmptyBytes(t *testing.T) { + type TestStruct struct { + Field1 uint8 `abiarg:"field1"` + } + + converter := func(item any) (TestStruct, error) { + return TestStruct{}, nil + } + + _, err := DecodeABIEncodedStructArray([]byte{}, converter) + require.Error(t, err) + require.Contains(t, err.Error(), "encoded bytes are empty") +} + +func TestDecodeABIEncodedStructArray_ConverterError(t *testing.T) { + type TestStruct struct { + Field1 uint8 `abiarg:"field1"` + Field2 uint32 `abiarg:"field2"` + } + + // Create test data + items := []TestStruct{ + {Field1: 1, Field2: 100}, + {Field1: 2, Field2: 200}, + } + + encodedBytes, err := EncodeABIStructArray(items) + require.NoError(t, err) + + // Converter that always fails + converter := func(item any) (TestStruct, error) { + return TestStruct{}, errors.New("converter failed") + } + + _, err = DecodeABIEncodedStructArray(encodedBytes, converter) + require.Error(t, err) + require.Contains(t, err.Error(), "failed to convert item 0") + require.Contains(t, err.Error(), "converter failed") +} + +func TestDecodeABIEncodedStructArray_WithBigInt(t *testing.T) { + type TestStruct struct { + Amount *big.Int `abiarg:"amount,uint256"` + Value uint32 `abiarg:"value"` + } + + // Create test data + items := []TestStruct{ + {Amount: big.NewInt(1000), Value: 1}, + {Amount: big.NewInt(2000), Value: 2}, + {Amount: big.NewInt(3000), Value: 3}, + } + + encodedBytes, err := EncodeABIStructArray(items) + require.NoError(t, err) + + // Converter that extracts fields + converter := func(item any) (TestStruct, error) { + // In real usage, you'd use reflection to extract the fields + return TestStruct{Amount: big.NewInt(1000), Value: 1}, nil + } + + decoded, err := DecodeABIEncodedStructArray(encodedBytes, converter) + require.NoError(t, err) + require.Len(t, decoded, 3) +} + +func TestDecodeABIEncodedStructArray_EmptyArray(t *testing.T) { + type TestStruct struct { + Field1 uint8 `abiarg:"field1"` + } + + // Encode empty array + items := []TestStruct{} + encodedBytes, err := EncodeABIStructArray(items) + require.NoError(t, err) + + converter := func(item any) (TestStruct, error) { + return TestStruct{}, nil + } + + decoded, err := DecodeABIEncodedStructArray(encodedBytes, converter) + require.NoError(t, err) + require.Len(t, decoded, 0) +} + +func TestDecodeABIEncodedStructArray_InvalidABIData(t *testing.T) { + type TestStruct struct { + Field1 uint8 `abiarg:"field1"` + } + + converter := func(item any) (TestStruct, error) { + return TestStruct{}, nil + } + + // Invalid ABI encoded data + invalidData := []byte{0x01, 0x02, 0x03} + + _, err := DecodeABIEncodedStructArray(invalidData, converter) + require.Error(t, err) + require.Contains(t, err.Error(), "failed to unpack data") +} + +func TestDecodeABIEncodedStructArray_ComplexStruct(t *testing.T) { + type ComplexStruct struct { + LeafType uint8 `abiarg:"leafType"` + OriginNetwork uint32 `abiarg:"originNetwork"` + OriginAddress common.Address `abiarg:"originAddress"` + DestinationNetwork uint32 `abiarg:"destinationNetwork"` + DestinationAddress common.Address `abiarg:"destinationAddress"` + Amount *big.Int `abiarg:"amount,uint256"` + Metadata []byte `abiarg:"metadata"` + } + + // Create test data + items := []ComplexStruct{ + { + LeafType: 1, + OriginNetwork: 1, + OriginAddress: common.HexToAddress("0x1111111111111111111111111111111111111111"), + DestinationNetwork: 2, + DestinationAddress: common.HexToAddress("0x2222222222222222222222222222222222222222"), + Amount: big.NewInt(1000), + Metadata: []byte("test1"), + }, + { + LeafType: 2, + OriginNetwork: 3, + OriginAddress: common.HexToAddress("0x3333333333333333333333333333333333333333"), + DestinationNetwork: 4, + DestinationAddress: common.HexToAddress("0x4444444444444444444444444444444444444444"), + Amount: big.NewInt(2000), + Metadata: []byte("test2"), + }, + } + + encodedBytes, err := EncodeABIStructArray(items) + require.NoError(t, err) + + converter := func(item any) (ComplexStruct, error) { + // Placeholder converter for test + return ComplexStruct{ + LeafType: 1, + OriginNetwork: 1, + Amount: big.NewInt(1000), + }, nil + } + + decoded, err := DecodeABIEncodedStructArray(encodedBytes, converter) + require.NoError(t, err) + require.Len(t, decoded, 2) +} + +func TestDecodeABIEncodedStructArray_NoABITags(t *testing.T) { + type BadStruct struct { + Field1 uint8 + Field2 uint32 + } + + converter := func(item any) (BadStruct, error) { + return BadStruct{}, nil + } + + // Try to decode with a struct that has no abiarg tags + // BuildABIFields will succeed but return empty fields, which will cause unpack to fail + _, err := DecodeABIEncodedStructArray([]byte{0x01}, converter) + require.Error(t, err) + // The error will be from unpacking due to insufficient data or empty ABI fields + require.Contains(t, err.Error(), "failed to") +} + +func TestDecodeABIEncodedStructArray_SingleItem(t *testing.T) { + type TestStruct struct { + Value uint64 `abiarg:"value"` + } + + // Create single item array + items := []TestStruct{ + {Value: 12345}, + } + + encodedBytes, err := EncodeABIStructArray(items) + require.NoError(t, err) + + converter := func(item any) (TestStruct, error) { + return TestStruct{Value: 12345}, nil + } + + decoded, err := DecodeABIEncodedStructArray(encodedBytes, converter) + require.NoError(t, err) + require.Len(t, decoded, 1) + require.Equal(t, uint64(12345), decoded[0].Value) +} + +func TestDecodeABIEncodedStructArray_ConverterPartialFailure(t *testing.T) { + type TestStruct struct { + Field1 uint8 `abiarg:"field1"` + Field2 uint32 `abiarg:"field2"` + } + + items := []TestStruct{ + {Field1: 1, Field2: 100}, + {Field1: 2, Field2: 200}, + {Field1: 3, Field2: 300}, + } + + encodedBytes, err := EncodeABIStructArray(items) + require.NoError(t, err) + + callCount := 0 + converter := func(item any) (TestStruct, error) { + callCount++ + if callCount == 2 { + return TestStruct{}, errors.New("failed on item 2") + } + return TestStruct{Field1: 1, Field2: 100}, nil + } + + _, err = DecodeABIEncodedStructArray(encodedBytes, converter) + require.Error(t, err) + require.Contains(t, err.Error(), "failed to convert item 1") + require.Contains(t, err.Error(), "failed on item 2") +} diff --git a/abi/abi_encode.go b/abi/abi_encode.go new file mode 100644 index 000000000..3d8d319df --- /dev/null +++ b/abi/abi_encode.go @@ -0,0 +1,38 @@ +package abi + +import ( + "fmt" + + "github.com/ethereum/go-ethereum/accounts/abi" +) + +// EncodeABIStructArray is a generic helper that encodes a slice of structs to ABI-encoded tuple array +// It automatically builds ABI fields from the struct type using reflection and abiarg tags +func EncodeABIStructArray[T any](items []T) ([]byte, error) { + // For empty slices, we need a sample instance to build ABI fields + var item T + if len(items) > 0 { + // Use the first item to infer the type + item = items[0] + } + + // Use first item to build ABI fields + abiFields, err := BuildABIFields(item) + if err != nil { + return nil, fmt.Errorf("failed to build ABI fields: %w", err) + } + + arrayType, err := abi.NewType("tuple[]", "", abiFields) + if err != nil { + return nil, fmt.Errorf("failed to create array type: %w", err) + } + + args := abi.Arguments{{Type: arrayType, Name: "data"}} + + encodedBytes, err := args.Pack(items) + if err != nil { + return nil, fmt.Errorf("failed to pack data: %w", err) + } + + return encodedBytes, nil +} diff --git a/abi/abi_encode_test.go b/abi/abi_encode_test.go new file mode 100644 index 000000000..b3ea3537a --- /dev/null +++ b/abi/abi_encode_test.go @@ -0,0 +1,86 @@ +package abi + +import ( + "math/big" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" +) + +func TestEncodeABIStructArray(t *testing.T) { + type TestStruct struct { + Field1 uint8 `abiarg:"field1"` + Field2 uint32 `abiarg:"field2"` + Field3 common.Address `abiarg:"field3"` + } + + items := []TestStruct{ + { + Field1: 1, + Field2: 100, + Field3: common.HexToAddress("0x1111111111111111111111111111111111111111"), + }, + { + Field1: 2, + Field2: 200, + Field3: common.HexToAddress("0x2222222222222222222222222222222222222222"), + }, + } + + encodedBytes, err := EncodeABIStructArray(items) + require.NoError(t, err) + require.NotEmpty(t, encodedBytes) + + // Decode to verify roundtrip + converter := func(item any) (TestStruct, error) { + // Simple converter for test verification + return TestStruct{}, nil + } + + _, err = DecodeABIEncodedStructArray(encodedBytes, converter) + require.NoError(t, err) +} + +func TestEncodeABIStructArray_EmptySlice(t *testing.T) { + type TestStruct struct { + Field1 uint8 `abiarg:"field1"` + Field2 uint32 `abiarg:"field2"` + } + + items := []TestStruct{} + + encodedBytes, err := EncodeABIStructArray(items) + require.NoError(t, err) + require.NotEmpty(t, encodedBytes) +} + +func TestEncodeABIStructArray_WithBigInt(t *testing.T) { + type TestStruct struct { + Amount *big.Int `abiarg:"amount,uint256"` + } + + items := []TestStruct{ + {Amount: big.NewInt(1000)}, + {Amount: big.NewInt(2000)}, + } + + encodedBytes, err := EncodeABIStructArray(items) + require.NoError(t, err) + require.NotEmpty(t, encodedBytes) +} + +func TestEncodeABIStructArray_NoTags(t *testing.T) { + type BadStruct struct { + Field1 uint8 + Field2 uint32 + } + + items := []BadStruct{ + {Field1: 1, Field2: 100}, + } + + encodedBytes, err := EncodeABIStructArray(items) + require.NoError(t, err) // Should work with empty fields (encodes empty array) + require.NotEmpty(t, encodedBytes) +} diff --git a/bridgesync/abi.go b/bridgesync/abi.go new file mode 100644 index 000000000..a349bf7d7 --- /dev/null +++ b/bridgesync/abi.go @@ -0,0 +1,132 @@ +package bridgesync + +import ( + "errors" + "fmt" + "math/big" + "reflect" + + aggkitabi "github.com/agglayer/aggkit/abi" + "github.com/ethereum/go-ethereum/common" +) + +type LeafData struct { + LeafType uint8 `abiarg:"leafType"` + OriginNetwork uint32 `abiarg:"originNetwork"` + OriginAddress common.Address `abiarg:"originAddress"` + DestinationNetwork uint32 `abiarg:"destinationNetwork"` + DestinationAddress common.Address `abiarg:"destinationAddress"` + Amount *big.Int `abiarg:"amount,uint256"` + Metadata []byte `abiarg:"metadata"` +} + +func (l LeafData) String() string { + return fmt.Sprintf("LeafData{LeafType: %d, OriginNetwork: %d, OriginAddress: %s, "+ + "DestinationNetwork: %d, DestinationAddress: %s, Amount: %s, Metadata: %x}", + l.LeafType, + l.OriginNetwork, + l.OriginAddress.Hex(), + l.DestinationNetwork, + l.DestinationAddress.Hex(), + l.Amount.String(), + l.Metadata, + ) +} + +func (l LeafData) ToBridge( + blockNum, blockPos, blockTimestamp uint64, + depositCount uint32, + txnHash common.Hash, + txnSender, fromAddr common.Address) Bridge { + return Bridge{ + BlockNum: blockNum, + BlockPos: blockPos, + BlockTimestamp: blockTimestamp, + DepositCount: depositCount, + TxHash: txnHash, + FromAddress: fromAddr, + TxnSender: txnSender, + LeafType: l.LeafType, + OriginNetwork: l.OriginNetwork, + OriginAddress: l.OriginAddress, + DestinationNetwork: l.DestinationNetwork, + DestinationAddress: l.DestinationAddress, + Amount: l.Amount, + Metadata: l.Metadata, + Source: BridgeSourceForwardLET, // this leaf comes from ForwardLET event + } +} + +// decodeForwardLETLeaves decodes the newLeaves bytes from a ForwardLET event +func decodeForwardLETLeaves(newLeavesBytes []byte) ([]LeafData, error) { + return aggkitabi.DecodeABIEncodedStructArray(newLeavesBytes, convertABILeafData) +} + +// convertABILeafData converts an anonymous struct returned by the ABI decoder +func convertABILeafData(item any) (LeafData, error) { + // Use reflection to extract fields from the anonymous struct created by ABI library + // The ABI library generates structs with JSON tags that don't match our named types + val := reflect.ValueOf(item) + if val.Kind() != reflect.Struct { + return LeafData{}, fmt.Errorf("expected struct, got %T", item) + } + + expectedFields := reflect.TypeOf(LeafData{}).NumField() + if val.NumField() != expectedFields { + return LeafData{}, fmt.Errorf("expected %d fields, got %d", expectedFields, val.NumField()) + } + + // Create a map of field names to values from the ABI struct + fieldMap := make(map[string]any) + valType := val.Type() + for i := 0; i < val.NumField(); i++ { + fieldName := valType.Field(i).Name + fieldMap[fieldName] = val.Field(i).Interface() + } + + // Extract fields by name with type assertions + leafType, ok := fieldMap["LeafType"].(uint8) + if !ok { + return LeafData{}, errors.New("failed to convert field 'leafType' to uint8") + } + + originNetwork, ok := fieldMap["OriginNetwork"].(uint32) + if !ok { + return LeafData{}, errors.New("failed to convert field 'originNetwork' to uint32") + } + + originAddress, ok := fieldMap["OriginAddress"].(common.Address) + if !ok { + return LeafData{}, errors.New("failed to convert field 'originAddress' to common.Address") + } + + destinationNetwork, ok := fieldMap["DestinationNetwork"].(uint32) + if !ok { + return LeafData{}, errors.New("failed to convert field 'destinationNetwork' to uint32") + } + + destinationAddress, ok := fieldMap["DestinationAddress"].(common.Address) + if !ok { + return LeafData{}, errors.New("failed to convert field 'destinationAddress' to common.Address") + } + + amount, ok := fieldMap["Amount"].(*big.Int) + if !ok { + return LeafData{}, errors.New("failed to convert field 'amount' to *big.Int") + } + + metadata, ok := fieldMap["Metadata"].([]byte) + if !ok { + return LeafData{}, errors.New("failed to convert field 'metadata' to []byte") + } + + return LeafData{ + LeafType: leafType, + OriginNetwork: originNetwork, + OriginAddress: originAddress, + DestinationNetwork: destinationNetwork, + DestinationAddress: destinationAddress, + Amount: amount, + Metadata: metadata, + }, nil +} diff --git a/bridgesync/abi_test.go b/bridgesync/abi_test.go new file mode 100644 index 000000000..87f85655e --- /dev/null +++ b/bridgesync/abi_test.go @@ -0,0 +1,167 @@ +package bridgesync + +import ( + "math/big" + "testing" + + aggkitabi "github.com/agglayer/aggkit/abi" + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" +) + +func TestDecodeForwardLETLeaves(t *testing.T) { + largeAmount := new(big.Int) + largeAmount.SetString("123456789012345678901234567890", 10) + + testCases := []struct { + name string + inputLeaves []LeafData + expectedLeaves []LeafData + errorMsg string + useRawBytes bool + rawBytes []byte + }{ + { + name: "successfully decode single leaf", + inputLeaves: []LeafData{ + { + LeafType: 1, + OriginNetwork: 5, + OriginAddress: common.HexToAddress("0x1234567890123456789012345678901234567890"), + DestinationNetwork: 10, + DestinationAddress: common.HexToAddress("0xabcdefabcdefabcdefabcdefabcdefabcdefabcd"), + Amount: big.NewInt(1000000), + Metadata: []byte("test metadata"), + }, + }, + }, + { + name: "successfully decode multiple leaves", + inputLeaves: []LeafData{ + { + LeafType: 0, + OriginNetwork: 1, + OriginAddress: common.HexToAddress("0x1111111111111111111111111111111111111111"), + DestinationNetwork: 2, + DestinationAddress: common.HexToAddress("0x2222222222222222222222222222222222222222"), + Amount: big.NewInt(100), + Metadata: []byte("first leaf"), + }, + { + LeafType: 1, + OriginNetwork: 3, + OriginAddress: common.HexToAddress("0x3333333333333333333333333333333333333333"), + DestinationNetwork: 4, + DestinationAddress: common.HexToAddress("0x4444444444444444444444444444444444444444"), + Amount: big.NewInt(200), + Metadata: []byte("second leaf"), + }, + { + LeafType: 2, + OriginNetwork: 5, + OriginAddress: common.HexToAddress("0x5555555555555555555555555555555555555555"), + DestinationNetwork: 6, + DestinationAddress: common.HexToAddress("0x6666666666666666666666666666666666666666"), + Amount: big.NewInt(300), + Metadata: []byte("third leaf"), + }, + }, + }, + { + name: "decode leaf with empty metadata", + inputLeaves: []LeafData{ + { + LeafType: 0, + OriginNetwork: 1, + OriginAddress: common.HexToAddress("0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"), + DestinationNetwork: 2, + DestinationAddress: common.HexToAddress("0xbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"), + Amount: big.NewInt(999), + Metadata: []byte{}, + }, + }, + }, + { + name: "decode leaf with large amount", + inputLeaves: []LeafData{ + { + LeafType: 255, // Max uint8 + OriginNetwork: 4294967295, // Max uint32 + OriginAddress: common.HexToAddress("0xffffffffffffffffffffffffffffffffffffffff"), + DestinationNetwork: 4294967295, // Max uint32 + DestinationAddress: common.HexToAddress("0xeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee"), + Amount: largeAmount, + Metadata: []byte("large amount test"), + }, + }, + }, + { + name: "decode empty array", + inputLeaves: []LeafData{}, + }, + { + name: "fail on empty bytes", + useRawBytes: true, + rawBytes: []byte{}, + errorMsg: "encoded bytes are empty", + }, + { + name: "fail on invalid encoded data", + useRawBytes: true, + rawBytes: []byte{0x00, 0x01, 0x02, 0x03, 0x04}, + errorMsg: "failed to unpack data", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + var encodedBytes []byte + var expectedLeaves []LeafData + + if tc.useRawBytes { + encodedBytes = tc.rawBytes + } else { + encodedBytes = encodeLeafDataArray(t, tc.inputLeaves) + expectedLeaves = tc.inputLeaves + } + + decodedLeaves, err := decodeForwardLETLeaves(encodedBytes) + + if tc.errorMsg != "" { + require.ErrorContains(t, err, tc.errorMsg) + require.Nil(t, decodedLeaves) + } else { + require.NoError(t, err) + require.Len(t, decodedLeaves, len(expectedLeaves)) + for i, expected := range expectedLeaves { + verifyLeafData(t, expected, decodedLeaves[i]) + } + } + }) + } +} + +// encodeLeafDataArray encodes a slice of LeafData using Solidity ABI encoding +// This simulates what the smart contract does with abi.encode(newLeaves) +func encodeLeafDataArray(t *testing.T, leaves []LeafData) []byte { + t.Helper() + + encodedBytes, err := aggkitabi.EncodeABIStructArray(leaves) + require.NoError(t, err) + + return encodedBytes +} + +// verifyLeafData compares two LeafData structs for equality +func verifyLeafData(t *testing.T, expected, actual LeafData) { + t.Helper() + + require.Equal(t, expected.LeafType, actual.LeafType, "LeafType mismatch") + require.Equal(t, expected.OriginNetwork, actual.OriginNetwork, "OriginNetwork mismatch") + require.Equal(t, expected.OriginAddress, actual.OriginAddress, "OriginAddress mismatch") + require.Equal(t, expected.DestinationNetwork, actual.DestinationNetwork, "DestinationNetwork mismatch") + require.Equal(t, expected.DestinationAddress, actual.DestinationAddress, "DestinationAddress mismatch") + require.Equal(t, 0, expected.Amount.Cmp(actual.Amount), "Amount mismatch: expected %s, got %s", + expected.Amount.String(), actual.Amount.String()) + require.Equal(t, expected.Metadata, actual.Metadata, "Metadata mismatch") +} diff --git a/bridgesync/backfill_tx_sender.go b/bridgesync/backfill_tx_sender.go index f3ef9f85b..30f5044ad 100644 --- a/bridgesync/backfill_tx_sender.go +++ b/bridgesync/backfill_tx_sender.go @@ -163,14 +163,16 @@ func (b *BackfillTxnSender) getRecordsNeedingBackfillCount(ctx context.Context, query := fmt.Sprintf(` SELECT COUNT(*) FROM %s - WHERE txn_sender = '' OR txn_sender IS NULL OR from_address = '' OR from_address IS NULL + WHERE (txn_sender = '' OR txn_sender IS NULL OR from_address = '' OR from_address IS NULL) + AND (source IS NULL OR (source != $1 AND source != $2)) `, tableName) var count int dbCtx, cancel := context.WithTimeout(ctx, b.dbTimeout) defer cancel() - err := b.db.QueryRowContext(dbCtx, query).Scan(&count) + err := b.db.QueryRowContext(dbCtx, query, + BridgeSourceBackwardLET, BridgeSourceForwardLET).Scan(&count) if err != nil { return 0, fmt.Errorf("failed to count records needing backfill: %w", err) } @@ -188,13 +190,15 @@ func (b *BackfillTxnSender) getRecordsNeedingBackfill( query := fmt.Sprintf(` SELECT * FROM %s - WHERE txn_sender = '' OR txn_sender IS NULL OR from_address = '' OR from_address IS NULL - LIMIT $1 + WHERE (txn_sender = '' OR txn_sender IS NULL OR from_address = '' OR from_address IS NULL) + AND (source IS NULL OR (source != $1 AND source != $2)) + LIMIT $3 `, tableName) dbCtx, cancel := context.WithTimeout(ctx, b.dbTimeout) defer cancel() - rows, err := b.db.QueryContext(dbCtx, query, limit) + rows, err := b.db.QueryContext(dbCtx, query, + BridgeSourceBackwardLET, BridgeSourceForwardLET, limit) if err != nil { return nil, fmt.Errorf("failed to query records needing backfill: %w", err) } diff --git a/bridgesync/backfill_tx_sender_test.go b/bridgesync/backfill_tx_sender_test.go index 685229a4b..216700f10 100644 --- a/bridgesync/backfill_tx_sender_test.go +++ b/bridgesync/backfill_tx_sender_test.go @@ -412,6 +412,112 @@ func TestBackfillTxnSender_getRecordsNeedingBackfillCount(t *testing.T) { require.Equal(t, 1, count) }) + t.Run("excludes backward_let and forward_let sources", func(t *testing.T) { + tempDir := t.TempDir() + dbPath := filepath.Join(tempDir, "test.db") + + // Run migrations + err := migrations.RunMigrations(dbPath) + require.NoError(t, err) + + // Create test data + database, err := db.NewSQLiteDB(dbPath) + require.NoError(t, err) + defer database.Close() + + ctx := context.Background() + tx, err := db.NewTx(ctx, database) + require.NoError(t, err) + + // Insert test data + _, err = tx.Exec(`INSERT INTO block (num) VALUES (1), (2), (3), (4)`) + require.NoError(t, err) + + // Insert bridge with empty txn_sender and NULL source (should be counted) + _, err = tx.Exec(` + INSERT INTO bridge ( + block_num, block_pos, leaf_type, origin_network, origin_address, + destination_network, destination_address, amount, metadata, deposit_count, + tx_hash, block_timestamp, from_address, txn_sender, source + ) VALUES ( + 1, 0, 1, 1, '0x1234567890123456789012345678901234567890', + 2, '0x0987654321098765432109876543210987654321', '1000000000000000000', + '', 1, '0xabcdef1234567890abcdef1234567890abcdef1234567890abcdef1234567890', + 1234567890, '0x1111111111111111111111111111111111111111', '', NULL + ) + `) + require.NoError(t, err) + + // Insert bridge with empty txn_sender and backward_let source (should NOT be counted) + _, err = tx.Exec(` + INSERT INTO bridge ( + block_num, block_pos, leaf_type, origin_network, origin_address, + destination_network, destination_address, amount, metadata, deposit_count, + tx_hash, block_timestamp, from_address, txn_sender, source + ) VALUES ( + 2, 0, 1, 1, '0x1234567890123456789012345678901234567890', + 2, '0x0987654321098765432109876543210987654321', '1000000000000000000', + '', 2, '0xabcdef1234567890abcdef1234567890abcdef1234567890abcdef1234567891', + 1234567890, '', '', 'backward_let' + ) + `) + require.NoError(t, err) + + // Insert bridge with empty txn_sender and forward_let source (should NOT be counted) + _, err = tx.Exec(` + INSERT INTO bridge ( + block_num, block_pos, leaf_type, origin_network, origin_address, + destination_network, destination_address, amount, metadata, deposit_count, + tx_hash, block_timestamp, from_address, txn_sender, source + ) VALUES ( + 3, 0, 1, 1, '0x1234567890123456789012345678901234567890', + 2, '0x0987654321098765432109876543210987654321', '1000000000000000000', + '', 3, '0xabcdef1234567890abcdef1234567890abcdef1234567890abcdef1234567892', + 1234567890, '', '', 'forward_let' + ) + `) + require.NoError(t, err) + + // Insert bridge with empty txn_sender and no source field (should be counted) + _, err = tx.Exec(` + INSERT INTO bridge ( + block_num, block_pos, leaf_type, origin_network, origin_address, + destination_network, destination_address, amount, metadata, deposit_count, + tx_hash, block_timestamp, from_address, txn_sender + ) VALUES ( + 4, 0, 1, 1, '0x1234567890123456789012345678901234567890', + 2, '0x0987654321098765432109876543210987654321', '1000000000000000000', + '', 4, '0xabcdef1234567890abcdef1234567890abcdef1234567890abcdef1234567893', + 1234567890, '', '' + ) + `) + require.NoError(t, err) + + err = tx.Commit() + require.NoError(t, err) + + mockClient := mocks.NewEthClienter(t) + logger := log.WithFields("module", "test") + backfiller, err := NewBackfillTxnSender(dbPath, mockClient, common.HexToAddress("0x1234"), logger) + require.NoError(t, err) + defer backfiller.Close() + + // Should only count the 2 records without backward_let or forward_let source + count, err := backfiller.getRecordsNeedingBackfillCount(ctx, "bridge") + require.NoError(t, err) + require.Equal(t, 2, count) + + // Verify getRecordsNeedingBackfill also excludes these sources + records, err := backfiller.getRecordsNeedingBackfill(ctx, "bridge", 10) + require.NoError(t, err) + require.Len(t, records, 2) + + // Verify the correct records were returned (block_num 1 and 4) + blockNums := []uint64{records[0].BlockNum, records[1].BlockNum} + require.Contains(t, blockNums, uint64(1)) + require.Contains(t, blockNums, uint64(4)) + }) + t.Run("database error", func(t *testing.T) { tempDir := t.TempDir() dbPath := filepath.Join(tempDir, "test.db") diff --git a/bridgesync/downloader.go b/bridgesync/downloader.go index f17d6520f..2ae475e3d 100644 --- a/bridgesync/downloader.go +++ b/bridgesync/downloader.go @@ -54,6 +54,7 @@ var ( "SetClaim(bytes32)", )) backwardLETEventSignature = crypto.Keccak256Hash([]byte("BackwardLET(uint256,bytes32,uint256,bytes32)")) + forwardLETEventSignature = crypto.Keccak256Hash([]byte("ForwardLET(uint256,bytes32,uint256,bytes32,bytes)")) claimAssetEtrogMethodID = common.Hex2Bytes("ccaa2d11") claimMessageEtrogMethodID = common.Hex2Bytes("f5efcd79") @@ -117,6 +118,7 @@ func buildAppender( appender[unsetClaimEventSignature] = buildUnsetClaimEventHandler(bridgeDeployment.agglayerBridgeL2) appender[setClaimEventSignature] = buildSetClaimEventHandler(bridgeDeployment.agglayerBridgeL2) appender[backwardLETEventSignature] = buildBackwardLETEventHandler(bridgeDeployment.agglayerBridgeL2) + appender[forwardLETEventSignature] = buildForwardLETEventHandler(bridgeDeployment.agglayerBridgeL2) default: return nil, fmt.Errorf("unsupported bridge deployment kind: %d", bridgeDeployment.kind) @@ -664,6 +666,29 @@ func buildBackwardLETEventHandler(contract *agglayerbridgel2.Agglayerbridgel2) f } } +// buildForwardLETEventHandler creates a handler for the ForwardLET event log +func buildForwardLETEventHandler(contract *agglayerbridgel2.Agglayerbridgel2) func(*sync.EVMBlock, types.Log) error { + return func(b *sync.EVMBlock, l types.Log) error { + event, err := contract.ParseForwardLET(l) + if err != nil { + return fmt.Errorf("error parsing ForwardLET event log %+v: %w", l, err) + } + + b.Events = append(b.Events, Event{ForwardLET: &ForwardLET{ + BlockNum: b.Num, + BlockPos: uint64(l.Index), + BlockTimestamp: b.Timestamp, + TxnHash: l.TxHash, + PreviousDepositCount: event.PreviousDepositCount, + PreviousRoot: event.PreviousRoot, + NewDepositCount: event.NewDepositCount, + NewRoot: event.NewRoot, + NewLeaves: event.NewLeaves, + }}) + return nil + } +} + type Call struct { From common.Address `json:"from"` To common.Address `json:"to"` diff --git a/bridgesync/downloader_test.go b/bridgesync/downloader_test.go index 7821a5ef2..70051b4c6 100644 --- a/bridgesync/downloader_test.go +++ b/bridgesync/downloader_test.go @@ -600,6 +600,33 @@ func TestBuildAppender(t *testing.T) { return l, nil }, }, + { + name: "forwardLETSignature appender", + eventSignature: forwardLETEventSignature, + deploymentKind: SovereignChain, + logBuilder: func() (types.Log, error) { + event, err := bridgeL2Abi.EventByID(forwardLETEventSignature) + if err != nil { + return types.Log{}, err + } + + previousDepositCount := big.NewInt(15) + previousRoot := common.HexToHash("0xdeadbeef15") + newDepositCount := big.NewInt(20) + newRoot := common.HexToHash("0x5ca1e20") + newLeaves := []byte("leavesdata") + data, err := event.Inputs.Pack(previousDepositCount, previousRoot, newDepositCount, newRoot, newLeaves) + if err != nil { + return types.Log{}, err + } + + l := types.Log{ + Topics: []common.Hash{forwardLETEventSignature}, + Data: data, + } + return l, nil + }, + }, { name: "unknown deployment kind", deploymentKind: 100, diff --git a/bridgesync/migrations/bridgesync0012.sql b/bridgesync/migrations/bridgesync0012.sql index 983d24982..51b106dc8 100644 --- a/bridgesync/migrations/bridgesync0012.sql +++ b/bridgesync/migrations/bridgesync0012.sql @@ -1,6 +1,7 @@ -- +migrate Down DROP TABLE IF EXISTS bridge_archive; DROP TABLE IF EXISTS backward_let; +DROP TABLE IF EXISTS forward_let; ALTER TABLE bridge DROP COLUMN source; ALTER TABLE bridge DROP COLUMN to_address; @@ -18,6 +19,18 @@ CREATE TABLE IF NOT EXISTS backward_let ( ALTER TABLE bridge ADD COLUMN source TEXT DEFAULT ''; ALTER TABLE bridge ADD COLUMN to_address VARCHAR; +CREATE TABLE IF NOT EXISTS forward_let ( + block_num INTEGER NOT NULL REFERENCES block (num) ON DELETE CASCADE, + block_pos INTEGER NOT NULL, + block_timestamp INTEGER NOT NULL, + tx_hash VARCHAR NOT NULL, + previous_deposit_count TEXT NOT NULL, + previous_root VARCHAR NOT NULL, + new_deposit_count TEXT NOT NULL, + new_root VARCHAR NOT NULL, + new_leaves BLOB NOT NULL, + PRIMARY KEY (block_num, block_pos) + ); ------------------------------------------------------------------------------ -- Create bridge_archive table ------------------------------------------------------------------------------ diff --git a/bridgesync/processor.go b/bridgesync/processor.go index e517088d9..0230cdd47 100644 --- a/bridgesync/processor.go +++ b/bridgesync/processor.go @@ -54,6 +54,9 @@ const ( // backwardLETTableName is the name of the table that stores backward local exit tree events backwardLETTableName = "backward_let" + // forwardLETTableName is the name of the table that stores forward local exit tree events + forwardLETTableName = "forward_let" + // nilStr holds nil string nilStr = "nil" ) @@ -493,6 +496,42 @@ func (b *BackwardLET) String() string { b.BlockNum, b.BlockPos, previousDepositCountStr, b.PreviousRoot.String(), newDepositCountStr, b.NewRoot.String()) } +// ForwardLET representation of a ForwardLET event, +// that is emitted by the L2 bridge contract when a LET is advanced. +type ForwardLET struct { + BlockNum uint64 `meddler:"block_num"` + BlockPos uint64 `meddler:"block_pos"` + BlockTimestamp uint64 `meddler:"block_timestamp"` + TxnHash common.Hash `meddler:"tx_hash,hash"` + PreviousDepositCount *big.Int `meddler:"previous_deposit_count,bigint"` + PreviousRoot common.Hash `meddler:"previous_root,hash"` + NewDepositCount *big.Int `meddler:"new_deposit_count,bigint"` + NewRoot common.Hash `meddler:"new_root,hash"` + NewLeaves []byte `meddler:"new_leaves"` +} + +// String returns a formatted string representation of ForwardLET for debugging and logging. +func (f *ForwardLET) String() string { + prevDepositCountStr := nilStr + if f.PreviousDepositCount != nil { + prevDepositCountStr = f.PreviousDepositCount.String() + } + + newDepositCountStr := nilStr + if f.NewDepositCount != nil { + newDepositCountStr = f.NewDepositCount.String() + } + + return fmt.Sprintf("ForwardLET{BlockNum: %d, BlockPos: %d, "+ + "BlockTimestamp: %d, TxnHash: %s, "+ + "PreviousDepositCount: %s, PreviousRoot: %s, "+ + "NewDepositCount: %s, NewRoot: %s, NewLeaves: %x}", + f.BlockNum, f.BlockPos, + f.BlockTimestamp, f.TxnHash.String(), + prevDepositCountStr, f.PreviousRoot.String(), + newDepositCountStr, f.NewRoot.String(), f.NewLeaves) +} + // Event combination of bridge, claim, token mapping and legacy token migration events type Event struct { Bridge *Bridge @@ -503,6 +542,7 @@ type Event struct { UnsetClaim *UnsetClaim SetClaim *SetClaim BackwardLET *BackwardLET + ForwardLET *ForwardLET } func (e Event) String() string { @@ -531,6 +571,9 @@ func (e Event) String() string { if e.BackwardLET != nil { parts = append(parts, e.BackwardLET.String()) } + if e.ForwardLET != nil { + parts = append(parts, e.ForwardLET.String()) + } return "Event{" + strings.Join(parts, ", ") + "}" } @@ -1431,6 +1474,7 @@ func (p *processor) ProcessBlock(ctx context.Context, block sync.Block) error { return err } + var blockPos *uint64 for _, e := range block.Events { event, ok := e.(Event) if !ok { @@ -1439,6 +1483,13 @@ func (p *processor) ProcessBlock(ctx context.Context, block sync.Block) error { } if event.Bridge != nil { + if blockPos != nil { + // increment block position based on forward LET events processed so far + // in the current block + event.Bridge.BlockPos = *blockPos + *blockPos++ + } + if _, err = p.exitTree.PutLeaf(tx, block.Num, event.Bridge.BlockPos, types.Leaf{ Index: event.Bridge.DepositCount, Hash: event.Bridge.Hash(), @@ -1498,6 +1549,12 @@ func (p *processor) ProcessBlock(ctx context.Context, block sync.Block) error { } if event.BackwardLET != nil { + // we sanity check that the previous root matches the latest one in the exit tree + if err := p.sanityCheckLatestLER(tx, event.BackwardLET.PreviousRoot); err != nil { + p.log.Errorf("failed to sanity check LER before processing BackwardLET: %v", err) + return err + } + newDepositCount, leafIndex, err := normalizeDepositCount(event.BackwardLET.NewDepositCount) if err != nil { return err @@ -1518,12 +1575,28 @@ func (p *processor) ProcessBlock(ctx context.Context, block sync.Block) error { return err } - // 3. insert the backward let event to designated table + // 4. sanity check that the new root matches the latest one in the exit tree + if err := p.sanityCheckLatestLER(tx, event.BackwardLET.NewRoot); err != nil { + p.log.Errorf("failed to sanity check LER after processing BackwardLET: %v", err) + return err + } + + // 5. insert the backward let event to designated table if err = meddler.Insert(tx, backwardLETTableName, event.BackwardLET); err != nil { p.log.Errorf("failed to insert backward local exit tree event at block %d: %v", block.Num, err) return err } } + + if event.ForwardLET != nil { + newBlockPos, err := p.handleForwardLETEvent(tx, event.ForwardLET, blockPos) + if err != nil { + p.log.Errorf("failed to handle forward LET event at block %d: %v", block.Num, err) + return err + } + + blockPos = &newBlockPos + } } if err := tx.Commit(); err != nil { @@ -1611,6 +1684,139 @@ func (p *processor) archiveAndDeleteBridgesAbove(ctx context.Context, tx dbtypes return nil } +// sanityCheckLatestLER checks if the provided local exit root matches the latest one in the exit tree +func (p *processor) sanityCheckLatestLER(tx dbtypes.Txer, ler common.Hash) error { + var lastRootHash common.Hash + + root, err := p.exitTree.GetLastRoot(tx) + if err != nil { + // if there is no root yet, we consider the zero hash as the last root + if !errors.Is(err, db.ErrNotFound) { + return fmt.Errorf("failed to get last root from exit tree: %w", err) + } + } else { + lastRootHash = root.Hash + } + + if lastRootHash != ler { + return fmt.Errorf("local exit root mismatch: expected %s, got %s", + ler.String(), lastRootHash.String()) + } + return nil +} + +// handleForwardLETEvent processes a ForwardLET event and updates the database accordingly +func (p *processor) handleForwardLETEvent(tx dbtypes.Txer, event *ForwardLET, blockPos *uint64) (uint64, error) { + // first we sanity check that the previous root matches the latest one in the exit tree + if err := p.sanityCheckLatestLER(tx, event.PreviousRoot); err != nil { + return 0, fmt.Errorf("failed to sanity check LER before processing ForwardLET: %w", err) + } + + // first we decode the new LET leaves from the forward LET event + // they are basically bridge events, but without some fields set (tx hash, sender, from address) + decodedNewLeaves, err := decodeForwardLETLeaves(event.NewLeaves) + if err != nil { + return 0, fmt.Errorf("failed to decode new leaves in forward LET: %w", err) + } + + newDepositCount := uint32(event.PreviousDepositCount.Uint64()) + 1 + newBlockPos := event.BlockPos + if blockPos != nil { + newBlockPos = *blockPos + } + + const getArchivedBridgesSQL = ` + SELECT * FROM bridge_archive + WHERE leaf_type = $1 + AND origin_network = $2 + AND origin_address = $3 + AND destination_network = $4 + AND destination_address = $5 + AND amount = $6 + AND metadata = $7 + ` + + // now we process each new leaf to insert them into the exit tree and bridges table + for _, leaf := range decodedNewLeaves { + var archivedBridges []*Bridge + err = meddler.QueryAll(tx, &archivedBridges, getArchivedBridgesSQL, + leaf.LeafType, + leaf.OriginNetwork, + leaf.OriginAddress, + leaf.DestinationNetwork, + leaf.DestinationAddress, + leaf.Amount.String(), + leaf.Metadata, + ) + if err != nil { + return 0, fmt.Errorf("failed to query archived bridges: %w", err) + } + + var ( + txnHash = event.TxnHash + txnSender, fromAddr common.Address + ) + + // let's see if we have exactly one archived bridge that matches the forward LET leaf + // usually we should have exactly one match since to recover the LET on L2, + // we must have a backwards LET done which archives the bridges, + // and then a forward LET that re-adds them to the exit tree after fixing it + // however, in case of multiple matches, we cannot be sure which one to use, + // so we will just log and leave the txnSender and fromAddr fields empty + if len(archivedBridges) == 1 { + archivedBridge := archivedBridges[0] + txnHash = archivedBridge.TxHash + txnSender = archivedBridge.TxnSender + fromAddr = archivedBridge.FromAddress + } else if len(archivedBridges) > 1 { + p.log.Debugf("multiple archived bridges found that match forward LET leaf %s;"+ + "cannot set txnSender and fromAddr fields to the bridge", leaf.String()) + } + + // create the new bridge event from the forward LET leaf + bridge := leaf.ToBridge( + event.BlockNum, + newBlockPos, + event.BlockTimestamp, + newDepositCount, + txnHash, + txnSender, + fromAddr, + ) + + // insert the new bridge leaf into the local exit tree + if _, err = p.exitTree.PutLeaf(tx, event.BlockNum, newBlockPos, types.Leaf{ + Index: newDepositCount, + Hash: bridge.Hash(), + }); err != nil { + if errors.Is(err, tree.ErrInvalidIndex) { + p.halt(fmt.Sprintf("error adding leaf to the exit tree: %v", err)) + } + return 0, sync.ErrInconsistentState + } + + // insert the new bridge into the bridges table + if err = meddler.Insert(tx, bridgeTableName, &bridge); err != nil { + return 0, fmt.Errorf("failed to insert bridge event from ForwardLET: %w", err) + } + + newDepositCount++ + newBlockPos++ + } + + // after processing all new leaves, we sanity check that the new root matches the latest one in the exit tree + if err := p.sanityCheckLatestLER(tx, event.NewRoot); err != nil { + return 0, fmt.Errorf("failed to sanity check LER after processing ForwardLET: %w", err) + } + + // finally, insert the forward LET event into the designated table + if err = meddler.Insert(tx, forwardLETTableName, event); err != nil { + return 0, fmt.Errorf("failed to insert forward local exit tree event: %w", err) + } + + return newBlockPos, nil +} + // GetTotalNumberOfRecords returns the total number of records in the given table func (p *processor) GetTotalNumberOfRecords(ctx context.Context, tableName, whereClause string) (int, error) { if !tableNameRegex.MatchString(tableName) { diff --git a/bridgesync/processor_forward_let_test.go b/bridgesync/processor_forward_let_test.go new file mode 100644 index 000000000..b5c92cfc2 --- /dev/null +++ b/bridgesync/processor_forward_let_test.go @@ -0,0 +1,734 @@ +package bridgesync + +import ( + "fmt" + "math/big" + "path/filepath" + "testing" + + aggkitabi "github.com/agglayer/aggkit/abi" + "github.com/agglayer/aggkit/bridgesync/migrations" + aggkitcommon "github.com/agglayer/aggkit/common" + "github.com/agglayer/aggkit/db" + dbtypes "github.com/agglayer/aggkit/db/types" + "github.com/agglayer/aggkit/log" + "github.com/agglayer/aggkit/tree/types" + "github.com/ethereum/go-ethereum/common" + "github.com/russross/meddler" + "github.com/stretchr/testify/require" +) + +func TestHandleForwardLETEvent(t *testing.T) { + t.Run("successfully process single leaf with no archived bridge", func(t *testing.T) { + p, tx := setupProcessorWithTransaction(t) + defer tx.Rollback() //nolint:errcheck + + // Setup: Insert initial leaves to establish previous root (indices 0-4) + var initialRoot common.Hash + var err error + // Insert block rows for initial leaves + for i := uint32(0); i <= 4; i++ { + _, err = tx.Exec(`INSERT INTO block (num) VALUES ($1)`, 10+uint64(i)) + require.NoError(t, err) + } + for i := uint32(0); i <= 4; i++ { + leaf := types.Leaf{Index: i, Hash: common.HexToHash(fmt.Sprintf("0x%d", i))} + initialRoot, err = p.exitTree.PutLeaf(tx, 10+uint64(i), 0, leaf) + require.NoError(t, err) + } + initialDepositCount := uint32(4) // Last index inserted + + // Insert block for the ForwardLET event + _, err = tx.Exec(`INSERT INTO block (num) VALUES ($1)`, uint64(100)) + require.NoError(t, err) + + // Create forward LET event with one leaf + leaves := []LeafData{ + { + LeafType: 1, + OriginNetwork: 5, + OriginAddress: common.HexToAddress("0x1234567890123456789012345678901234567890"), + DestinationNetwork: 10, + DestinationAddress: common.HexToAddress("0xabcdefabcdefabcdefabcdefabcdefabcdefabcd"), + Amount: big.NewInt(1000000), + Metadata: []byte("test metadata"), + }, + } + encodedLeaves := encodeLeafDataArrayForTest(t, leaves) + + event := &ForwardLET{ + BlockNum: 100, + BlockPos: 5, + BlockTimestamp: 1234567890, + TxnHash: common.HexToHash("0xabc123"), + PreviousDepositCount: big.NewInt(int64(initialDepositCount)), + PreviousRoot: initialRoot, + NewDepositCount: big.NewInt(int64(initialDepositCount + 1)), + NewLeaves: encodedLeaves, + } + + // Calculate the expected root that will result from processing these leaves + event.NewRoot = calculateExpectedRootAfterForwardLET(t, initialDepositCount, leaves, event) + + // Test: Process the forward LET event + blockPos := event.BlockPos + newBlockPos, err := p.handleForwardLETEvent(tx, event, &blockPos) + require.NoError(t, err) + require.Equal(t, event.BlockPos+1, newBlockPos) + + // Verify: Bridge was inserted + var bridges []*Bridge + err = meddler.QueryAll(tx, &bridges, "SELECT * FROM bridge WHERE block_num = $1", event.BlockNum) + require.NoError(t, err) + require.Len(t, bridges, 1) + + bridge := bridges[0] + require.Equal(t, event.BlockNum, bridge.BlockNum) + require.Equal(t, event.BlockPos, bridge.BlockPos) + require.Equal(t, leaves[0].LeafType, bridge.LeafType) + require.Equal(t, leaves[0].OriginNetwork, bridge.OriginNetwork) + require.Equal(t, leaves[0].OriginAddress, bridge.OriginAddress) + require.Equal(t, leaves[0].DestinationNetwork, bridge.DestinationNetwork) + require.Equal(t, leaves[0].DestinationAddress, bridge.DestinationAddress) + require.Equal(t, 0, leaves[0].Amount.Cmp(bridge.Amount)) + require.Equal(t, leaves[0].Metadata, bridge.Metadata) + require.Equal(t, initialDepositCount+1, bridge.DepositCount) + require.Equal(t, event.TxnHash, bridge.TxHash) + require.Equal(t, aggkitcommon.ZeroAddress, bridge.TxnSender) + require.Equal(t, aggkitcommon.ZeroAddress, bridge.FromAddress) + require.Equal(t, BridgeSourceForwardLET, bridge.Source) + + // Verify: ForwardLET event was inserted + var forwardLETs []*ForwardLET + err = meddler.QueryAll(tx, &forwardLETs, "SELECT * FROM forward_let WHERE block_num = $1", event.BlockNum) + require.NoError(t, err) + require.Len(t, forwardLETs, 1) + require.Equal(t, event.BlockNum, forwardLETs[0].BlockNum) + }) + + t.Run("successfully process multiple leaves", func(t *testing.T) { + p, tx := setupProcessorWithTransaction(t) + defer tx.Rollback() //nolint:errcheck + + // Setup: Insert initial leaves (indices 0-9) + var initialRoot common.Hash + var err error + // Insert block rows for initial leaves + for i := uint32(0); i <= 9; i++ { + _, err = tx.Exec(`INSERT INTO block (num) VALUES ($1)`, 20+uint64(i)) + require.NoError(t, err) + } + for i := uint32(0); i <= 9; i++ { + leaf := types.Leaf{Index: i, Hash: common.HexToHash(fmt.Sprintf("0x%d", i))} + initialRoot, err = p.exitTree.PutLeaf(tx, 20+uint64(i), 0, leaf) + require.NoError(t, err) + } + initialDepositCount := uint32(9) // Last index inserted + + // Insert block for the ForwardLET event + _, err = tx.Exec(`INSERT INTO block (num) VALUES ($1)`, uint64(200)) + require.NoError(t, err) + + // Create forward LET event with three leaves + leaves := []LeafData{ + { + LeafType: 0, + OriginNetwork: 1, + OriginAddress: common.HexToAddress("0x1111111111111111111111111111111111111111"), + DestinationNetwork: 2, + DestinationAddress: common.HexToAddress("0x2222222222222222222222222222222222222222"), + Amount: big.NewInt(100), + Metadata: []byte("first"), + }, + { + LeafType: 1, + OriginNetwork: 3, + OriginAddress: common.HexToAddress("0x3333333333333333333333333333333333333333"), + DestinationNetwork: 4, + DestinationAddress: common.HexToAddress("0x4444444444444444444444444444444444444444"), + Amount: big.NewInt(200), + Metadata: []byte("second"), + }, + { + LeafType: 2, + OriginNetwork: 5, + OriginAddress: common.HexToAddress("0x5555555555555555555555555555555555555555"), + DestinationNetwork: 6, + DestinationAddress: common.HexToAddress("0x6666666666666666666666666666666666666666"), + Amount: big.NewInt(300), + Metadata: []byte("third"), + }, + } + encodedLeaves := encodeLeafDataArrayForTest(t, leaves) + + event := &ForwardLET{ + BlockNum: 200, + BlockPos: 10, + BlockTimestamp: 1234567900, + TxnHash: common.HexToHash("0xdef456"), + PreviousDepositCount: big.NewInt(int64(initialDepositCount)), + PreviousRoot: initialRoot, + NewDepositCount: big.NewInt(int64(initialDepositCount + uint32(len(leaves)))), + NewLeaves: encodedLeaves, + } + + // Calculate the expected root that will result from processing these leaves + event.NewRoot = calculateExpectedRootAfterForwardLET(t, initialDepositCount, leaves, event) + + // Test: Process the forward LET event + blockPos := event.BlockPos + newBlockPos, err := p.handleForwardLETEvent(tx, event, &blockPos) + require.NoError(t, err) + require.Equal(t, event.BlockPos+uint64(len(leaves)), newBlockPos) + + // Verify: All bridges were inserted + var bridges []*Bridge + err = meddler.QueryAll(tx, &bridges, "SELECT * FROM bridge WHERE block_num = $1 ORDER BY block_pos", event.BlockNum) + require.NoError(t, err) + require.Len(t, bridges, 3) + + // Verify each bridge + for i, bridge := range bridges { + require.Equal(t, event.BlockNum, bridge.BlockNum) + require.Equal(t, event.BlockPos+uint64(i), bridge.BlockPos) + require.Equal(t, leaves[i].LeafType, bridge.LeafType) + require.Equal(t, leaves[i].OriginNetwork, bridge.OriginNetwork) + require.Equal(t, initialDepositCount+uint32(i)+1, bridge.DepositCount) + require.Equal(t, BridgeSourceForwardLET, bridge.Source) + } + }) + + t.Run("process leaf with matching archived bridge", func(t *testing.T) { + p, tx := setupProcessorWithTransaction(t) + defer tx.Rollback() //nolint:errcheck + + // Setup: Insert initial leaves (indices 0-14) + var initialRoot common.Hash + var err error + // Insert block rows for initial leaves + for i := uint32(0); i <= 14; i++ { + _, err = tx.Exec(`INSERT INTO block (num) VALUES ($1)`, 30+uint64(i)) + require.NoError(t, err) + } + for i := uint32(0); i <= 14; i++ { + leaf := types.Leaf{Index: i, Hash: common.HexToHash(fmt.Sprintf("0x%d", i))} + initialRoot, err = p.exitTree.PutLeaf(tx, 30+uint64(i), 0, leaf) + require.NoError(t, err) + } + initialDepositCount := uint32(14) // Last index inserted + + // Insert blocks for the archived bridge and ForwardLET event + _, err = tx.Exec(`INSERT INTO block (num) VALUES ($1), ($2)`, uint64(50), uint64(300)) + require.NoError(t, err) + + // Setup: Create and archive a bridge that will match the forward LET leaf + archivedTxHash := common.HexToHash("0xoriginal123") + archivedTxnSender := common.HexToAddress("0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") + archivedFromAddr := common.HexToAddress("0xbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb") + + archivedBridge := &Bridge{ + BlockNum: 50, + BlockPos: 0, + LeafType: 1, + OriginNetwork: 7, + OriginAddress: common.HexToAddress("0x7777777777777777777777777777777777777777"), + DestinationNetwork: 8, + DestinationAddress: common.HexToAddress("0x8888888888888888888888888888888888888888"), + Amount: big.NewInt(500000), + Metadata: []byte("archived metadata"), + DepositCount: 20, + TxHash: archivedTxHash, + TxnSender: archivedTxnSender, + FromAddress: archivedFromAddr, + // Don't set Source - bridge_archive table doesn't have this column + } + // Insert manually to avoid Source field + err = meddler.Insert(tx, "bridge_archive", archivedBridge) + require.NoError(t, err) + + // Create forward LET event with matching leaf + leaves := []LeafData{ + { + LeafType: archivedBridge.LeafType, + OriginNetwork: archivedBridge.OriginNetwork, + OriginAddress: archivedBridge.OriginAddress, + DestinationNetwork: archivedBridge.DestinationNetwork, + DestinationAddress: archivedBridge.DestinationAddress, + Amount: archivedBridge.Amount, + Metadata: archivedBridge.Metadata, + }, + } + encodedLeaves := encodeLeafDataArrayForTest(t, leaves) + + event := &ForwardLET{ + BlockNum: 300, + BlockPos: 20, + BlockTimestamp: 1234567950, + TxnHash: common.HexToHash("0xforward789"), + PreviousDepositCount: big.NewInt(int64(initialDepositCount)), + PreviousRoot: initialRoot, + NewDepositCount: big.NewInt(int64(initialDepositCount + 1)), + NewLeaves: encodedLeaves, + } + + // Calculate expected new root using helper (which will query for archived bridge) + event.NewRoot = calculateExpectedRootAfterForwardLET(t, initialDepositCount, leaves, event, archivedBridge) + + // Test: Process the forward LET event + blockPos := event.BlockPos + newBlockPos, err := p.handleForwardLETEvent(tx, event, &blockPos) + require.NoError(t, err) + require.Equal(t, event.BlockPos+1, newBlockPos) + + // Verify: Bridge was inserted with archived tx info + var bridges []*Bridge + err = meddler.QueryAll(tx, &bridges, "SELECT * FROM bridge WHERE block_num = $1", event.BlockNum) + require.NoError(t, err) + require.Len(t, bridges, 1) + + bridge := bridges[0] + require.Equal(t, archivedTxHash, bridge.TxHash, "Should use archived tx hash") + require.Equal(t, archivedTxnSender, bridge.TxnSender, "Should use archived txn sender") + require.Equal(t, archivedFromAddr, bridge.FromAddress, "Should use archived from address") + require.Equal(t, BridgeSourceForwardLET, bridge.Source) + }) + + t.Run("process leaf with multiple matching archived bridges", func(t *testing.T) { + p, tx := setupProcessorWithTransaction(t) + defer tx.Rollback() //nolint:errcheck + + // Setup: Insert initial leaves (indices 0-24) + var initialRoot common.Hash + var err error + // Insert block rows for initial leaves + for i := uint32(0); i <= 24; i++ { + _, err = tx.Exec(`INSERT INTO block (num) VALUES ($1)`, 40+uint64(i)) + require.NoError(t, err) + } + for i := uint32(0); i <= 24; i++ { + leaf := types.Leaf{Index: i, Hash: common.HexToHash(fmt.Sprintf("0x%d", i))} + initialRoot, err = p.exitTree.PutLeaf(tx, 40+uint64(i), 0, leaf) + require.NoError(t, err) + } + initialDepositCount := uint32(24) // Last index inserted + + // Insert blocks for archived bridges (60, 61 already exist from initial leaves) and ForwardLET event + _, err = tx.Exec(`INSERT INTO block (num) VALUES ($1)`, uint64(400)) + require.NoError(t, err) + + // Setup: Create two archived bridges with identical LeafData fields + commonLeafData := LeafData{ + LeafType: 1, + OriginNetwork: 9, + OriginAddress: common.HexToAddress("0x9999999999999999999999999999999999999999"), + DestinationNetwork: 11, + DestinationAddress: common.HexToAddress("0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"), + Amount: big.NewInt(750000), + Metadata: []byte("duplicate metadata"), + } + + archivedBridge1 := &Bridge{ + BlockNum: 60, + BlockPos: 0, + LeafType: commonLeafData.LeafType, + OriginNetwork: commonLeafData.OriginNetwork, + OriginAddress: commonLeafData.OriginAddress, + DestinationNetwork: commonLeafData.DestinationNetwork, + DestinationAddress: commonLeafData.DestinationAddress, + Amount: commonLeafData.Amount, + Metadata: commonLeafData.Metadata, + DepositCount: 30, + TxHash: common.HexToHash("0xfirst111"), + TxnSender: common.HexToAddress("0x1111111111111111111111111111111111111111"), + FromAddress: common.HexToAddress("0x2222222222222222222222222222222222222222"), + } + + archivedBridge2 := &Bridge{ + BlockNum: 61, + BlockPos: 0, + LeafType: commonLeafData.LeafType, + OriginNetwork: commonLeafData.OriginNetwork, + OriginAddress: commonLeafData.OriginAddress, + DestinationNetwork: commonLeafData.DestinationNetwork, + DestinationAddress: commonLeafData.DestinationAddress, + Amount: commonLeafData.Amount, + Metadata: commonLeafData.Metadata, + DepositCount: 31, + TxHash: common.HexToHash("0xsecond222"), + TxnSender: common.HexToAddress("0x3333333333333333333333333333333333333333"), + FromAddress: common.HexToAddress("0x4444444444444444444444444444444444444444"), + } + + // Insert both archived bridges manually (to avoid Source column) + for _, archived := range []*Bridge{archivedBridge1, archivedBridge2} { + err = meddler.Insert(tx, "bridge_archive", archived) + require.NoError(t, err) + } + + // Create forward LET event with the common leaf + leaves := []LeafData{commonLeafData} + encodedLeaves := encodeLeafDataArrayForTest(t, leaves) + + event := &ForwardLET{ + BlockNum: 400, + BlockPos: 30, + BlockTimestamp: 1234567999, + TxnHash: common.HexToHash("0xforward999"), + PreviousDepositCount: big.NewInt(int64(initialDepositCount)), + PreviousRoot: initialRoot, + NewDepositCount: big.NewInt(int64(initialDepositCount + 1)), + NewLeaves: encodedLeaves, + } + + // Calculate expected new root using helper (with no archived bridge info since multiple matches) + event.NewRoot = calculateExpectedRootAfterForwardLET(t, initialDepositCount, leaves, event) + + // Test: Process the forward LET event + blockPos := event.BlockPos + newBlockPos, err := p.handleForwardLETEvent(tx, event, &blockPos) + require.NoError(t, err) + require.Equal(t, event.BlockPos+1, newBlockPos) + + // Verify: Bridge was inserted with event's tx hash and empty addresses + var bridges []*Bridge + err = meddler.QueryAll(tx, &bridges, "SELECT * FROM bridge WHERE block_num = $1", event.BlockNum) + require.NoError(t, err) + require.Len(t, bridges, 1) + + bridge := bridges[0] + require.Equal(t, event.TxnHash, bridge.TxHash, "Should use event's tx hash when multiple archived bridges match") + require.Equal(t, common.Address{}, bridge.TxnSender, "TxnSender should be empty with multiple matches") + require.Equal(t, common.Address{}, bridge.FromAddress, "FromAddress should be empty with multiple matches") + require.Equal(t, BridgeSourceForwardLET, bridge.Source) + }) + + t.Run("error on previous root mismatch", func(t *testing.T) { + p, tx := setupProcessorWithTransaction(t) + defer tx.Rollback() //nolint:errcheck + + // Setup: Insert initial leaves (indices 0-4) + var initialRoot common.Hash + var err error + for i := uint32(0); i <= 4; i++ { + leaf := types.Leaf{Index: i, Hash: common.HexToHash(fmt.Sprintf("0x%d", i))} + initialRoot, err = p.exitTree.PutLeaf(tx, 10+uint64(i), 0, leaf) + require.NoError(t, err) + } + initialDepositCount := uint32(4) // Last index inserted + + // Create forward LET event with WRONG previous root + leaves := []LeafData{ + { + LeafType: 1, + OriginNetwork: 5, + OriginAddress: common.HexToAddress("0x1234567890123456789012345678901234567890"), + DestinationNetwork: 10, + DestinationAddress: common.HexToAddress("0xabcdefabcdefabcdefabcdefabcdefabcdefabcd"), + Amount: big.NewInt(1000000), + Metadata: []byte("test"), + }, + } + encodedLeaves := encodeLeafDataArrayForTest(t, leaves) + + event := &ForwardLET{ + BlockNum: 100, + BlockPos: 5, + BlockTimestamp: 1234567890, + TxnHash: common.HexToHash("0xabc123"), + PreviousDepositCount: big.NewInt(int64(initialDepositCount)), + PreviousRoot: common.HexToHash("0xWRONG"), // Wrong root + NewDepositCount: big.NewInt(int64(initialDepositCount + 1)), + NewRoot: common.HexToHash("0x999"), + NewLeaves: encodedLeaves, + } + + // Test: Should fail with root mismatch + blockPos := event.BlockPos + _, err = p.handleForwardLETEvent(tx, event, &blockPos) + require.Error(t, err) + require.Contains(t, err.Error(), "local exit root mismatch") + require.Contains(t, err.Error(), initialRoot.String()) + }) + + t.Run("error on new root mismatch", func(t *testing.T) { + p, tx := setupProcessorWithTransaction(t) + defer tx.Rollback() //nolint:errcheck + + // Setup: Insert initial leaves (indices 0-4) + var initialRoot common.Hash + var err error + // Insert block rows for initial leaves + for i := uint32(0); i <= 4; i++ { + _, err = tx.Exec(`INSERT INTO block (num) VALUES ($1)`, 10+uint64(i)) + require.NoError(t, err) + } + for i := uint32(0); i <= 4; i++ { + leaf := types.Leaf{Index: i, Hash: common.HexToHash(fmt.Sprintf("0x%d", i))} + initialRoot, err = p.exitTree.PutLeaf(tx, 10+uint64(i), 0, leaf) + require.NoError(t, err) + } + initialDepositCount := uint32(4) // Last index inserted + + // Insert block for the ForwardLET event + _, err = tx.Exec(`INSERT INTO block (num) VALUES ($1)`, uint64(100)) + require.NoError(t, err) + + // Create forward LET event + leaves := []LeafData{ + { + LeafType: 1, + OriginNetwork: 5, + OriginAddress: common.HexToAddress("0x1234567890123456789012345678901234567890"), + DestinationNetwork: 10, + DestinationAddress: common.HexToAddress("0xabcdefabcdefabcdefabcdefabcdefabcdefabcd"), + Amount: big.NewInt(1000000), + Metadata: []byte("test"), + }, + } + encodedLeaves := encodeLeafDataArrayForTest(t, leaves) + + event := &ForwardLET{ + BlockNum: 100, + BlockPos: 5, + BlockTimestamp: 1234567890, + TxnHash: common.HexToHash("0xabc123"), + PreviousDepositCount: big.NewInt(int64(initialDepositCount)), + PreviousRoot: initialRoot, + NewDepositCount: big.NewInt(int64(initialDepositCount + 1)), + NewRoot: common.HexToHash("0xWRONG"), // Wrong new root + NewLeaves: encodedLeaves, + } + + // Test: Should fail with new root mismatch after processing + blockPos := event.BlockPos + _, err = p.handleForwardLETEvent(tx, event, &blockPos) + require.Error(t, err) + require.Contains(t, err.Error(), "local exit root mismatch") + }) + + t.Run("error on invalid encoded leaves", func(t *testing.T) { + p, tx := setupProcessorWithTransaction(t) + defer tx.Rollback() //nolint:errcheck + + // Setup: Insert initial leaves (indices 0-4) + var initialRoot common.Hash + var err error + for i := uint32(0); i <= 4; i++ { + leaf := types.Leaf{Index: i, Hash: common.HexToHash(fmt.Sprintf("0x%d", i))} + initialRoot, err = p.exitTree.PutLeaf(tx, 10+uint64(i), 0, leaf) + require.NoError(t, err) + } + initialDepositCount := uint32(4) // Last index inserted + + event := &ForwardLET{ + BlockNum: 100, + BlockPos: 5, + BlockTimestamp: 1234567890, + TxnHash: common.HexToHash("0xabc123"), + PreviousDepositCount: big.NewInt(int64(initialDepositCount)), + PreviousRoot: initialRoot, + NewDepositCount: big.NewInt(int64(initialDepositCount + 1)), + NewRoot: common.Hash{}, + NewLeaves: []byte("invalid data"), // Invalid encoding + } + + // Test: Should fail to decode leaves + blockPos := event.BlockPos + _, err = p.handleForwardLETEvent(tx, event, &blockPos) + require.Error(t, err) + require.Contains(t, err.Error(), "failed to decode new leaves") + }) + + t.Run("process with nil blockPos parameter", func(t *testing.T) { + p, tx := setupProcessorWithTransaction(t) + defer tx.Rollback() //nolint:errcheck + + // Setup: Insert initial leaves (indices 0-4) + var initialRoot common.Hash + var err error + // Insert block rows for initial leaves + for i := uint32(0); i <= 4; i++ { + _, err = tx.Exec(`INSERT INTO block (num) VALUES ($1)`, 10+uint64(i)) + require.NoError(t, err) + } + for i := uint32(0); i <= 4; i++ { + leaf := types.Leaf{Index: i, Hash: common.HexToHash(fmt.Sprintf("0x%d", i))} + initialRoot, err = p.exitTree.PutLeaf(tx, 10+uint64(i), 0, leaf) + require.NoError(t, err) + } + initialDepositCount := uint32(4) // Last index inserted + + // Insert block for the ForwardLET event + _, err = tx.Exec(`INSERT INTO block (num) VALUES ($1)`, uint64(100)) + require.NoError(t, err) + + leaves := []LeafData{ + { + LeafType: 1, + OriginNetwork: 5, + OriginAddress: common.HexToAddress("0x1234567890123456789012345678901234567890"), + DestinationNetwork: 10, + DestinationAddress: common.HexToAddress("0xabcdefabcdefabcdefabcdefabcdefabcdefabcd"), + Amount: big.NewInt(1000000), + Metadata: []byte("test"), + }, + } + encodedLeaves := encodeLeafDataArrayForTest(t, leaves) + + event := &ForwardLET{ + BlockNum: 100, + BlockPos: 5, + BlockTimestamp: 1234567890, + TxnHash: common.HexToHash("0xabc123"), + PreviousDepositCount: big.NewInt(int64(initialDepositCount)), + PreviousRoot: initialRoot, + NewDepositCount: big.NewInt(int64(initialDepositCount + 1)), + NewLeaves: encodedLeaves, + } + + // Calculate expected root using helper + event.NewRoot = calculateExpectedRootAfterForwardLET(t, initialDepositCount, leaves, event) + + // Test: Process with nil blockPos (should use event.BlockPos) + newBlockPos, err := p.handleForwardLETEvent(tx, event, nil) + require.NoError(t, err) + require.Equal(t, event.BlockPos+1, newBlockPos) + + // Verify: Bridge uses event.BlockPos + var bridges []*Bridge + err = meddler.QueryAll(tx, &bridges, "SELECT * FROM bridge WHERE block_num = $1", event.BlockNum) + require.NoError(t, err) + require.Len(t, bridges, 1) + require.Equal(t, event.BlockPos, bridges[0].BlockPos) + }) +} + +// setupProcessorWithTransaction creates a processor and begins a transaction for testing +func setupProcessorWithTransaction(t *testing.T) (*processor, dbtypes.Txer) { + t.Helper() + + dbPath := filepath.Join(t.TempDir(), "test_forward_let.db") + err := migrations.RunMigrations(dbPath) + require.NoError(t, err) + + logger := log.WithFields("module", "test") + p, err := newProcessor(dbPath, "test", logger, dbQueryTimeout) + require.NoError(t, err) + + tx, err := db.NewTx(t.Context(), p.db) + require.NoError(t, err) + + return p, tx +} + +// calculateExpectedRootAfterForwardLET calculates what the tree root will be after processing ForwardLET leaves +// It does this using a completely separate processor to avoid affecting the test state +// archivedBridges: optional map from leaf index (in leaves slice) to archived bridge info +func calculateExpectedRootAfterForwardLET(t *testing.T, initialDepositCount uint32, + leaves []LeafData, event *ForwardLET, archivedBridges ...*Bridge) common.Hash { + t.Helper() + + // Build a map for quick lookup of archived bridge info by leaf data + archivedByLeaf := make(map[int]*Bridge) + for i, archived := range archivedBridges { + if archived != nil { + archivedByLeaf[i] = archived + } + } + + // Create a temporary processor with its own database + tempDBPath := filepath.Join(t.TempDir(), "temp_calc.db") + err := migrations.RunMigrations(tempDBPath) + require.NoError(t, err) + + logger := log.WithFields("module", "test-calc") + tempP, err := newProcessor(tempDBPath, "test-calc", logger, dbQueryTimeout) + require.NoError(t, err) + + tempTx, err := db.NewTx(t.Context(), tempP.db) + require.NoError(t, err) + defer tempTx.Rollback() //nolint:errcheck + + // Insert block rows for the setup leaves + for i := uint32(0); i <= initialDepositCount; i++ { + _, err = tempTx.Exec(`INSERT INTO block (num) VALUES ($1)`, 10+uint64(i)) + require.NoError(t, err) + } + + // Insert block row for the ForwardLET event + _, err = tempTx.Exec(`INSERT INTO block (num) VALUES ($1)`, event.BlockNum) + require.NoError(t, err) + + // Insert archived bridges if provided + for _, archived := range archivedBridges { + if archived != nil { + _, err = tempTx.Exec(`INSERT INTO block (num) VALUES ($1)`, archived.BlockNum) + require.NoError(t, err) + + _, err = tempTx.Exec(` + INSERT INTO bridge_archive ( + block_num, block_pos, leaf_type, origin_network, origin_address, + destination_network, destination_address, amount, metadata, deposit_count, + tx_hash, block_timestamp, from_address, txn_sender + ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, 0, $12, $13) + `, archived.BlockNum, archived.BlockPos, archived.LeafType, + archived.OriginNetwork, archived.OriginAddress.Hex(), + archived.DestinationNetwork, archived.DestinationAddress.Hex(), + archived.Amount.String(), archived.Metadata, archived.DepositCount, + archived.TxHash.Hex(), archived.FromAddress.Hex(), archived.TxnSender.Hex()) + require.NoError(t, err) + } + } + + // Rebuild tree state up to initialDepositCount + for i := uint32(0); i <= initialDepositCount; i++ { + leaf := types.Leaf{Index: i, Hash: common.HexToHash(fmt.Sprintf("0x%d", i))} + _, err = tempP.exitTree.PutLeaf(tempTx, 10+uint64(i), 0, leaf) + require.NoError(t, err) + } + + // Now add the ForwardLET leaves (will query for archived bridges) + currentDepositCount := initialDepositCount + 1 + var newRoot common.Hash + for i, leaf := range leaves { + // Try to get archived bridge info if available + var txHash common.Hash + var txnSender, fromAddr common.Address + if archived, found := archivedByLeaf[i]; found { + txHash = archived.TxHash + txnSender = archived.TxnSender + fromAddr = archived.FromAddress + } else { + txHash = event.TxnHash + // txnSender and fromAddr remain zero + } + + bridge := leaf.ToBridge( + event.BlockNum, + event.BlockPos+uint64(i), + event.BlockTimestamp, + currentDepositCount, + txHash, + txnSender, + fromAddr, + ) + newRoot, err = tempP.exitTree.PutLeaf(tempTx, event.BlockNum, event.BlockPos+uint64(i), types.Leaf{ + Index: currentDepositCount, + Hash: bridge.Hash(), + }) + require.NoError(t, err) + currentDepositCount++ + } + + return newRoot +} + +// encodeLeafDataArrayForTest encodes a slice of LeafData using ABI encoding +func encodeLeafDataArrayForTest(t *testing.T, leaves []LeafData) []byte { + t.Helper() + + encodedBytes, err := aggkitabi.EncodeABIStructArray(leaves) + require.NoError(t, err) + + return encodedBytes +} diff --git a/bridgesync/processor_test.go b/bridgesync/processor_test.go index 0d8f798f5..a53b6a731 100644 --- a/bridgesync/processor_test.go +++ b/bridgesync/processor_test.go @@ -484,6 +484,8 @@ var ( BlockPos: 4, PreviousDepositCount: big.NewInt(3), NewDepositCount: big.NewInt(2), + PreviousRoot: common.HexToHash("0x15cd4b94cacc2cf50d055e1adb5fbfe5cd95485e121a5c411d73e263f2a66685"), + NewRoot: common.HexToHash("0xa03113d9ce128863f29479689c82d0b37ebc9432c569c3a57f22d6c008256c5b"), }}, }, } @@ -5422,6 +5424,8 @@ func TestProcessor_BackwardLET(t *testing.T) { BlockPos: 0, PreviousDepositCount: big.NewInt(3), NewDepositCount: big.NewInt(2), + PreviousRoot: common.HexToHash("0x9ba667158a062be548e5c1b2e8a9a2ad03b693e562535b0723880627c6664b02"), + NewRoot: common.HexToHash("0xa9d31ebbb97c7cd7c7103bee8af7d0b4c83771939baba0b415b0f94c4c39fd84"), }}, }, }) @@ -5444,6 +5448,8 @@ func TestProcessor_BackwardLET(t *testing.T) { BlockPos: 0, PreviousDepositCount: big.NewInt(5), NewDepositCount: big.NewInt(0), + PreviousRoot: common.HexToHash("0x9ba667158a062be548e5c1b2e8a9a2ad03b693e562535b0723880627c6664b02"), + NewRoot: common.HexToHash("0x283c52c3d10a22d01f95f5bcab5e823675c9855bd40b1e82f32b0437b3b6a446"), }}, }, }) @@ -5466,6 +5472,8 @@ func TestProcessor_BackwardLET(t *testing.T) { BlockPos: 0, PreviousDepositCount: big.NewInt(5), NewDepositCount: big.NewInt(4), + PreviousRoot: common.HexToHash("0x9ba667158a062be548e5c1b2e8a9a2ad03b693e562535b0723880627c6664b02"), + NewRoot: common.HexToHash("0x44e1bf8449ecec2b8b1d123fab00d33c9acb308e590605adf5f6e2de4d1c1133"), }}, }, } @@ -5489,6 +5497,8 @@ func TestProcessor_BackwardLET(t *testing.T) { BlockPos: 0, PreviousDepositCount: big.NewInt(5), NewDepositCount: big.NewInt(2), + PreviousRoot: common.HexToHash("0x9ba667158a062be548e5c1b2e8a9a2ad03b693e562535b0723880627c6664b02"), + NewRoot: common.HexToHash("0xa9d31ebbb97c7cd7c7103bee8af7d0b4c83771939baba0b415b0f94c4c39fd84"), }}, }, } @@ -5514,6 +5524,8 @@ func TestProcessor_BackwardLET(t *testing.T) { BlockPos: 0, PreviousDepositCount: big.NewInt(5), NewDepositCount: big.NewInt(3), + PreviousRoot: common.HexToHash("0x9ba667158a062be548e5c1b2e8a9a2ad03b693e562535b0723880627c6664b02"), + NewRoot: common.HexToHash("0x7533c9ef58edd0bea7959a20c33ed47e5548d35f4ff140c5c915740fe6800fb8"), }}, }, }) @@ -5526,6 +5538,8 @@ func TestProcessor_BackwardLET(t *testing.T) { BlockPos: 0, PreviousDepositCount: big.NewInt(4), NewDepositCount: big.NewInt(3), + PreviousRoot: common.HexToHash("0x7533c9ef58edd0bea7959a20c33ed47e5548d35f4ff140c5c915740fe6800fb8"), + NewRoot: common.HexToHash("0x7533c9ef58edd0bea7959a20c33ed47e5548d35f4ff140c5c915740fe6800fb8"), }}, }, }) @@ -5605,6 +5619,8 @@ func TestProcessor_BackwardLET(t *testing.T) { BlockPos: 0, PreviousDepositCount: big.NewInt(5), NewDepositCount: big.NewInt(2), + PreviousRoot: common.HexToHash("0x9ba667158a062be548e5c1b2e8a9a2ad03b693e562535b0723880627c6664b02"), + NewRoot: common.HexToHash("0xa9d31ebbb97c7cd7c7103bee8af7d0b4c83771939baba0b415b0f94c4c39fd84"), }}, }, } @@ -5629,6 +5645,8 @@ func TestProcessor_BackwardLET(t *testing.T) { BlockPos: 0, PreviousDepositCount: big.NewInt(5), NewDepositCount: big.NewInt(2), + PreviousRoot: common.HexToHash("0x9ba667158a062be548e5c1b2e8a9a2ad03b693e562535b0723880627c6664b02"), + NewRoot: common.HexToHash("0xa9d31ebbb97c7cd7c7103bee8af7d0b4c83771939baba0b415b0f94c4c39fd84"), }}, }, }