diff --git a/CLAUDE.md b/CLAUDE.md index 35def1f..73823a1 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -91,18 +91,18 @@ Is leader = leaderValue < threshold ``` ### Nonce Evolution -Per block: `nonceValue = BLAKE2b-256(0x4E || vrfOutput)`, then `eta_v = BLAKE2b-256(eta_v || nonceValue)`. Rolling eta_v accumulates across epoch boundaries (no reset). Candidate nonce freezes at 70% epoch progress (stability window). Koios used as fallback when local nonce unavailable. +Per block: `nonceValue = BLAKE2b-256(0x4E || vrfOutput)`, then `eta_v = eta_v XOR nonceValue` (Cardano Nonce semigroup: `Nonce a <> Nonce b = Nonce (xor a b)`). Rolling eta_v accumulates across epoch boundaries (no reset). Candidate nonce (`η_c`) freezes at 60% epoch progress (stability window = 259,200 slots on mainnet). At each epoch boundary, the TICKN rule computes: `epochNonce = η_c XOR η_ph` where `η_ph` is the block hash from the last block of the prior epoch. Koios used as fallback in lite mode when local nonce unavailable. **Batch processing:** `ProcessBatch()` method in `nonce.go` performs in-memory nonce evolution for batches of blocks (used during historical sync), then persists the final nonce state in a single DB transaction. This dramatically improves sync performance vs per-block DB writes. ### Trigger Flow -1. Every block: extract VRF output from header, update evolving nonce -2. At 70% epoch progress: freeze candidate nonce +1. Every block: extract VRF output from header, update evolving nonce via XOR +2. At 60% epoch progress (stability window): freeze candidate nonce 3. After freeze: calculate next epoch schedule (mutex-guarded, one goroutine per epoch) 4. Post schedule to Telegram, store in database ### Race Condition Prevention -`checkLeaderlogTrigger` fires on every block after 70% — uses `leaderlogMu` mutex + `leaderlogCalcing` map to ensure only one goroutine runs per epoch. Map entry is cleaned up after goroutine completes. +`checkLeaderlogTrigger` fires on every block after 60% — uses `leaderlogMu` mutex + `leaderlogCalcing` map to ensure only one goroutine runs per epoch. Map entry is cleaned up after goroutine completes. ### VRF Extraction by Era @@ -246,7 +246,7 @@ Test files: - `leaderlog_test.go` — SlotToEpoch (all networks), round-trip, formatNumber ## Key Dependencies -- `blinklabs-io/adder` v0.37.0 — live chain tail (must match gouroboros version) +- `blinklabs-io/adder` v0.37.1-pre (commit 460d03e, fixes auto-reconnect channel orphaning) — live chain tail - `blinklabs-io/gouroboros` v0.153.1 — VRF (ECVRF-ED25519-SHA512-Elligator2), NtN ChainSync, NtC LocalStateQuery, ledger types - `modernc.org/sqlite` — pure Go SQLite (no CGO required) - `jackc/pgx/v5` — PostgreSQL driver with COPY protocol support for bulk inserts diff --git a/README.md b/README.md index 2d49590..302e762 100644 --- a/README.md +++ b/README.md @@ -10,7 +10,7 @@ A Cardano stake pool companion. Block notifications, leader schedule, epoch nonc **Leader Schedule** — Pure Go CPRAOS implementation checking every slot per epoch against your VRF key. Calculates next epoch schedule automatically at the stability window (60% into epoch). On-demand via `/leaderlog`. -**Epoch Nonces** — In full mode, streams every block from Shelley genesis extracting VRF outputs per era, evolving the nonce via BLAKE2b-256, and freezing at the stability window. Backfills ~400 epochs in under 2 minutes. +**Epoch Nonces** — In full mode, streams every block from Shelley genesis extracting VRF outputs per era, evolving the nonce via XOR (Cardano Nonce semigroup), and freezing at the stability window. Backfills ~400 epochs in under 2 minutes. **Stake Queries** — Direct NtC local state query to your cardano-node for mark/set/go stake snapshots. Falls back to Koios if NtC is unavailable. @@ -234,9 +234,9 @@ The epoch nonce is a 32-byte hash serving as randomness for VRF leader election. **In full mode**, duckBot self-computes nonces by streaming every block from Shelley genesis: 1. Per block: extract VRF output from header, compute `nonceValue = BLAKE2b-256("N" || vrfOutput)` -2. Evolve: `eta_v = BLAKE2b-256(eta_v || nonceValue)` — rolling accumulation across the epoch -3. At stability window (60% epoch progress): freeze candidate nonce -4. Final nonce: `BLAKE2b-256(candidateNonce || previousEpochNonce)` +2. Evolve: `eta_v = eta_v XOR nonceValue` — Cardano Nonce semigroup (`Nonce a <> Nonce b = Nonce (xor a b)`) +3. At stability window (60% epoch progress): freeze candidate nonce (`η_c`) +4. Epoch transition (TICKN rule): `epochNonce = η_c XOR η_ph` where `η_ph` is the last block hash from the prior epoch boundary **In lite mode**, nonces are fetched from the Koios API. @@ -367,7 +367,7 @@ The adder pipeline handles reconnection at the outer level — duckBot wraps it | Library | Version | Purpose | | ------- | ------- | ------- | | `blinklabs-io/gouroboros` | v0.153.1 | VRF, NtN ChainSync, NtC LocalStateQuery, ledger types | -| `blinklabs-io/adder` | v0.37.0 | Live chain tail pipeline (full block data incl. tx count) | +| `blinklabs-io/adder` | v0.37.1-pre | Live chain tail pipeline (full block data incl. tx count) | | `cardano-community/koios-go-client/v3` | | Koios API for stake data and nonce fallback | | `gopkg.in/telebot.v4` | | Telegram bot framework | | `michimani/gotwi` | | Twitter/X API v2 client | diff --git a/db.go b/db.go index 7489226..ea6d50d 100644 --- a/db.go +++ b/db.go @@ -272,7 +272,7 @@ func (s *PgStore) InsertBlockBatch(ctx context.Context, blocks []BlockData) erro func (s *PgStore) StreamBlockNonces(ctx context.Context) (BlockNonceRows, error) { rows, err := s.pool.Query(ctx, - `SELECT epoch, slot, nonce_value FROM blocks ORDER BY slot`, + `SELECT epoch, slot, nonce_value, block_hash FROM blocks ORDER BY slot`, ) if err != nil { return nil, err @@ -343,12 +343,13 @@ func (s *PgStore) GetLeaderSchedule(ctx context.Context, epoch int) (*LeaderSche // pgBlockNonceRows wraps pgx.Rows to implement BlockNonceRows. type pgBlockNonceRows struct { - rows pgx.Rows - epoch int - slot uint64 - nonce []byte - err error - closed bool + rows pgx.Rows + epoch int + slot uint64 + nonce []byte + blockHash string + err error + closed bool } func (r *pgBlockNonceRows) Next() bool { @@ -361,13 +362,13 @@ func (r *pgBlockNonceRows) Next() bool { return false } var slotInt64 int64 - r.err = r.rows.Scan(&r.epoch, &slotInt64, &r.nonce) + r.err = r.rows.Scan(&r.epoch, &slotInt64, &r.nonce, &r.blockHash) r.slot = uint64(slotInt64) return r.err == nil } -func (r *pgBlockNonceRows) Scan() (epoch int, slot uint64, nonceValue []byte, err error) { - return r.epoch, r.slot, r.nonce, r.err +func (r *pgBlockNonceRows) Scan() (epoch int, slot uint64, nonceValue []byte, blockHash string, err error) { + return r.epoch, r.slot, r.nonce, r.blockHash, r.err } func (r *pgBlockNonceRows) Close() { diff --git a/epoch612_integration_test.go b/epoch612_integration_test.go index 1abbef4..605b66e 100644 --- a/epoch612_integration_test.go +++ b/epoch612_integration_test.go @@ -8,8 +8,6 @@ import ( "os" "testing" "time" - - "golang.org/x/crypto/blake2b" ) func TestEpoch612LeaderSchedule(t *testing.T) { @@ -36,9 +34,9 @@ func TestEpoch612LeaderSchedule(t *testing.T) { ctx := context.Background() // === Step 1: Compute epoch 612 nonce from chain data === - // Stream ALL blocks from Shelley genesis, evolving nonce exactly like the bot does. + // Stream ALL blocks from Shelley genesis, evolving nonce via XOR (Nonce semigroup). // At each epoch's 60% stability window: freeze candidate nonce. - // At each epoch boundary: compute epoch_nonce(e+1) = hash(eta_c(e) || eta_0(e)). + // At each epoch boundary: TICKN rule — η(new) = η_c XOR η_ph overallStart := time.Now() nonceStart := time.Now() @@ -48,7 +46,9 @@ func TestEpoch612LeaderSchedule(t *testing.T) { copy(etaV, genesisHash) eta0 := make([]byte, 32) // epoch nonce — eta_0(208) = shelley genesis hash copy(eta0, genesisHash) - etaC := make([]byte, 32) // candidate nonce (frozen at stability window) + etaC := make([]byte, 32) // candidate nonce (frozen at stability window) + prevHashNonce := make([]byte, 32) // η_ph — NeutralNonce at Shelley start + var lastBlockHash string currentEpoch := ShelleyStartEpoch candidateFrozen := false @@ -57,7 +57,7 @@ func TestEpoch612LeaderSchedule(t *testing.T) { log.Printf("Streaming blocks from Shelley genesis to compute epoch nonces...") rows, err := store.pool.Query(ctx, - "SELECT epoch, slot, nonce_value FROM blocks WHERE epoch <= 611 ORDER BY slot ASC") + "SELECT epoch, slot, nonce_value, block_hash FROM blocks WHERE epoch <= 611 ORDER BY slot ASC") if err != nil { t.Fatalf("Failed to query blocks: %v", err) } @@ -67,29 +67,29 @@ func TestEpoch612LeaderSchedule(t *testing.T) { var epoch int var slot uint64 var nonceValue []byte - if err := rows.Scan(&epoch, &slot, &nonceValue); err != nil { + var blockHash string + if err := rows.Scan(&epoch, &slot, &nonceValue, &blockHash); err != nil { t.Fatalf("Scan failed: %v", err) } - // Epoch transition + // Epoch transition — TICKN rule: η(new) = η_c ⊕ η_ph if epoch != currentEpoch { - // If we didn't freeze candidate yet (epoch had < 60% blocks), freeze now if !candidateFrozen { etaC = make([]byte, 32) copy(etaC, etaV) } - // Compute epoch nonce for new epoch: eta_0(e+1) = hash(eta_c(e) || eta_0(e)) - h, _ := blake2b.New256(nil) - h.Write(etaC) - h.Write(eta0) - eta0 = h.Sum(nil) + eta0 = xorBytes(etaC, prevHashNonce) + if lastBlockHash != "" { + prevHashNonce, _ = hex.DecodeString(lastBlockHash) + } currentEpoch = epoch candidateFrozen = false } - // Evolve eta_v + // Evolve eta_v via XOR etaV = evolveNonce(etaV, nonceValue) + lastBlockHash = blockHash blockCount++ // Freeze candidate at 60% stability window @@ -112,10 +112,10 @@ func TestEpoch612LeaderSchedule(t *testing.T) { etaC = make([]byte, 32) copy(etaC, etaV) } - h, _ := blake2b.New256(nil) - h.Write(etaC) - h.Write(eta0) - epoch612Nonce := h.Sum(nil) + if lastBlockHash != "" { + prevHashNonce, _ = hex.DecodeString(lastBlockHash) + } + epoch612Nonce := xorBytes(etaC, prevHashNonce) nonceElapsed := time.Since(nonceStart) log.Printf("Nonce computation: %d blocks processed in %v", blockCount, nonceElapsed) diff --git a/go.mod b/go.mod index 6bce552..61a5d37 100644 --- a/go.mod +++ b/go.mod @@ -5,7 +5,7 @@ go 1.24.0 toolchain go1.24.11 require ( - github.com/blinklabs-io/adder v0.37.0 + github.com/blinklabs-io/adder v0.37.1-0.20260209154719-460d03ed24c1 github.com/blinklabs-io/gouroboros v0.153.1 github.com/cardano-community/koios-go-client/v3 v3.1.3 github.com/cenkalti/backoff/v4 v4.3.0 diff --git a/go.sum b/go.sum index eb6dabb..fbd95a1 100644 --- a/go.sum +++ b/go.sum @@ -11,8 +11,8 @@ github.com/aws/aws-sdk-go v1.55.6 h1:cSg4pvZ3m8dgYcgqB97MrcdjUmZ1BeMYKUxMMB89IPk github.com/aws/aws-sdk-go v1.55.6/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= github.com/bits-and-blooms/bitset v1.24.4 h1:95H15Og1clikBrKr/DuzMXkQzECs1M6hhoGXLwLQOZE= github.com/bits-and-blooms/bitset v1.24.4/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= -github.com/blinklabs-io/adder v0.37.0 h1:yfBA+4S34LLgqOcOS5H5XIOvNEhmdypEV7xw+ES/nQ4= -github.com/blinklabs-io/adder v0.37.0/go.mod h1:CJsbQyGJKGgDksRXgHxda5Uco11HSXhYmLqjcxrGFH8= +github.com/blinklabs-io/adder v0.37.1-0.20260209154719-460d03ed24c1 h1:C6Y03ERl3OFKknBKSpTnbckbJ1Zr0hJm5sbgfvGu9aU= +github.com/blinklabs-io/adder v0.37.1-0.20260209154719-460d03ed24c1/go.mod h1:a8OjDZFulnrpWAzPZR/htfHzc2gPRL+Lm975fK6Hm4Q= github.com/blinklabs-io/gouroboros v0.153.1 h1:9Jj4hHFrVmmUbFAg4Jg8p8R07Cimb7rXJL5xrimGOi8= github.com/blinklabs-io/gouroboros v0.153.1/go.mod h1:MTwq+I/IMtzzWGN2Jd87uuryMfOD+3mQGYNFJn1PSFY= github.com/blinklabs-io/ouroboros-mock v0.9.0 h1:O4FhgxKt43RcZGcxRQAOV9GMF6F06qtpU76eFeBKWeQ= diff --git a/helm-chart/templates/configmap.yaml b/helm-chart/templates/configmap.yaml index c4e16c1..4dc592f 100644 --- a/helm-chart/templates/configmap.yaml +++ b/helm-chart/templates/configmap.yaml @@ -55,6 +55,9 @@ data: {{- end }} timezone: {{ .Values.config.leaderlog.timezone | quote }} timeFormat: {{ .Values.config.leaderlog.timeFormat | default "12h" | quote }} + {{- if .Values.config.leaderlog.ntcQueryTimeout }} + ntcQueryTimeout: {{ .Values.config.leaderlog.ntcQueryTimeout | quote }} + {{- end }} database: driver: {{ .Values.config.database.driver | quote }} {{- if eq .Values.config.database.driver "sqlite" }} diff --git a/helm-chart/values.yaml b/helm-chart/values.yaml index 1a1393b..3d7637f 100644 --- a/helm-chart/values.yaml +++ b/helm-chart/values.yaml @@ -51,6 +51,8 @@ config: timezone: "" # Time format: "12h" or "24h" timeFormat: "12h" + # NtC query timeout for stake snapshot queries (Go duration, e.g. "10m", "30m") + ntcQueryTimeout: "10m" database: driver: "sqlite" diff --git a/localquery.go b/localquery.go index 6bab56c..78360cd 100644 --- a/localquery.go +++ b/localquery.go @@ -18,15 +18,20 @@ import ( type NodeQueryClient struct { nodeAddress string networkMagic uint32 + queryTimeout time.Duration } // NewNodeQueryClient creates a new node query client. // nodeAddress can be a TCP address ("host:port"), a UNIX socket path ("/ipc/node.socket"), // or explicitly prefixed ("unix:///ipc/node.socket", "tcp://host:port"). -func NewNodeQueryClient(nodeAddress string, networkMagic int) *NodeQueryClient { +func NewNodeQueryClient(nodeAddress string, networkMagic int, queryTimeout time.Duration) *NodeQueryClient { + if queryTimeout == 0 { + queryTimeout = 10 * time.Minute + } return &NodeQueryClient{ nodeAddress: nodeAddress, networkMagic: uint32(networkMagic), + queryTimeout: queryTimeout, } } @@ -68,7 +73,7 @@ func (c *NodeQueryClient) withQuery(ctx context.Context, fn func(*localstatequer ouroboros.WithKeepAlive(false), ouroboros.WithLocalStateQueryConfig( localstatequery.NewConfig( - localstatequery.WithQueryTimeout(10*time.Minute), + localstatequery.WithQueryTimeout(c.queryTimeout), ), ), ) diff --git a/main.go b/main.go index 2094b93..0d9d71a 100644 --- a/main.go +++ b/main.go @@ -287,8 +287,9 @@ func (i *Indexer) Start() error { } if ntcHost != "" { network, address := parseNodeAddress(ntcHost) - i.nodeQuery = NewNodeQueryClient(ntcHost, i.networkMagic) - log.Printf("Node query client initialized (NtC): %s://%s", network, address) + ntcQueryTimeout := viper.GetDuration("leaderlog.ntcQueryTimeout") + i.nodeQuery = NewNodeQueryClient(ntcHost, i.networkMagic, ntcQueryTimeout) + log.Printf("Node query client initialized (NtC): %s://%s (timeout: %v)", network, address, i.nodeQuery.queryTimeout) } // Twitter toggle diff --git a/nonce.go b/nonce.go index b59fb7c..e55dc7b 100644 --- a/nonce.go +++ b/nonce.go @@ -77,13 +77,20 @@ func vrfNonceValue(vrfOutput []byte) []byte { return h.Sum(nil) } +// xorBytes XORs two 32-byte slices. Implements the Cardano Nonce semigroup: +// Nonce a <> Nonce b = Nonce (Hash.xor a b) +func xorBytes(a, b []byte) []byte { + result := make([]byte, 32) + for i := 0; i < 32; i++ { + result[i] = a[i] ^ b[i] + } + return result +} + // evolveNonce updates the evolving nonce with a new nonce contribution. -// eta_v = BLAKE2b-256(eta_v || nonceValue) +// eta_v = eta_v XOR nonceValue (Cardano Nonce semigroup) func evolveNonce(currentNonce, nonceValue []byte) []byte { - h, _ := blake2b.New256(nil) - h.Write(currentNonce) - h.Write(nonceValue) - return h.Sum(nil) + return xorBytes(currentNonce, nonceValue) } // ProcessBlock processes a block's VRF output for nonce evolution. @@ -246,8 +253,10 @@ func (nt *NonceTracker) GetNonceForEpoch(epoch int) ([]byte, error) { } // ComputeEpochNonce computes the epoch nonce for targetEpoch entirely from local chain data. -// Streams all blocks from Shelley genesis, evolving the nonce and freezing at the -// stability window (60%) of each epoch, then computing epoch_nonce = hash(eta_c || eta_0). +// Streams all blocks from Shelley genesis, evolving the nonce via XOR and freezing at the +// stability window (60%) of each epoch, then applying the TICKN transition rule: +// +// η(new) = η_c XOR η_ph (candidate nonce XOR previous block hash nonce) func (nt *NonceTracker) ComputeEpochNonce(ctx context.Context, targetEpoch int) ([]byte, error) { shelleyStart := ShelleyStartEpoch if nt.networkMagic == PreprodNetworkMagic { @@ -263,6 +272,8 @@ func (nt *NonceTracker) ComputeEpochNonce(ctx context.Context, targetEpoch int) eta0 := make([]byte, 32) // eta_0(shelleyStart) = shelley genesis hash copy(eta0, genesisHash) etaC := make([]byte, 32) + prevHashNonce := make([]byte, 32) // η_ph — NeutralNonce at Shelley start + var lastBlockHash string currentEpoch := shelleyStart candidateFrozen := false @@ -274,22 +285,21 @@ func (nt *NonceTracker) ComputeEpochNonce(ctx context.Context, targetEpoch int) defer rows.Close() for rows.Next() { - epoch, slot, nonceValue, err := rows.Scan() + epoch, slot, nonceValue, blockHash, err := rows.Scan() if err != nil { return nil, fmt.Errorf("scanning block: %w", err) } - // Epoch transition + // Epoch transition — TICKN rule: η(new) = η_c ⊕ η_ph if epoch != currentEpoch { if !candidateFrozen { etaC = make([]byte, 32) copy(etaC, etaV) } - // eta_0(new) = hash(eta_c(old) || eta_0(old)) - h, _ := blake2b.New256(nil) - h.Write(etaC) - h.Write(eta0) - eta0 = h.Sum(nil) + eta0 = xorBytes(etaC, prevHashNonce) + if lastBlockHash != "" { + prevHashNonce, _ = hex.DecodeString(lastBlockHash) + } // If we just transitioned INTO the target epoch, we have eta_0(target) if epoch == targetEpoch { @@ -304,6 +314,7 @@ func (nt *NonceTracker) ComputeEpochNonce(ctx context.Context, targetEpoch int) // Evolve eta_v etaV = evolveNonce(etaV, nonceValue) + lastBlockHash = blockHash // Freeze candidate at stability window if !candidateFrozen { @@ -325,10 +336,10 @@ func (nt *NonceTracker) ComputeEpochNonce(ctx context.Context, targetEpoch int) etaC = make([]byte, 32) copy(etaC, etaV) } - h, _ := blake2b.New256(nil) - h.Write(etaC) - h.Write(eta0) - result := h.Sum(nil) + if lastBlockHash != "" { + prevHashNonce, _ = hex.DecodeString(lastBlockHash) + } + result := xorBytes(etaC, prevHashNonce) log.Printf("Computed nonce for epoch %d: %s", targetEpoch, hex.EncodeToString(result)) return result, nil } @@ -349,6 +360,8 @@ func (nt *NonceTracker) BackfillNonces(ctx context.Context) error { eta0 := make([]byte, 32) copy(eta0, genesisHash) etaC := make([]byte, 32) + prevHashNonce := make([]byte, 32) // η_ph — NeutralNonce at Shelley start + var lastBlockHash string currentEpoch := shelleyStart candidateFrozen := false @@ -366,21 +379,21 @@ func (nt *NonceTracker) BackfillNonces(ctx context.Context) error { defer rows.Close() for rows.Next() { - epoch, slot, nonceValue, scanErr := rows.Scan() + epoch, slot, nonceValue, blockHash, scanErr := rows.Scan() if scanErr != nil { return fmt.Errorf("scanning block: %w", scanErr) } - // Epoch transition — compute nonce for the new epoch + // Epoch transition — TICKN rule: η(new) = η_c ⊕ η_ph if epoch != currentEpoch { if !candidateFrozen { etaC = make([]byte, 32) copy(etaC, etaV) } - h, _ := blake2b.New256(nil) - h.Write(etaC) - h.Write(eta0) - eta0 = h.Sum(nil) + eta0 = xorBytes(etaC, prevHashNonce) + if lastBlockHash != "" { + prevHashNonce, _ = hex.DecodeString(lastBlockHash) + } // Cache if not already present existing, _ := nt.store.GetFinalNonce(ctx, epoch) @@ -401,6 +414,7 @@ func (nt *NonceTracker) BackfillNonces(ctx context.Context) error { // Evolve eta_v etaV = evolveNonce(etaV, nonceValue) + lastBlockHash = blockHash // Freeze candidate at stability window if !candidateFrozen { diff --git a/nonce_test.go b/nonce_test.go index 0aa509b..49b0705 100644 --- a/nonce_test.go +++ b/nonce_test.go @@ -33,7 +33,7 @@ func TestVrfNonceValue(t *testing.T) { } func TestEvolveNonce(t *testing.T) { - // evolveNonce = BLAKE2b-256(currentNonce || nonceValue) + // evolveNonce = XOR (Cardano Nonce semigroup) currentNonce := make([]byte, 32) currentNonce[0] = 0xAA nonceValue := make([]byte, 32) @@ -41,15 +41,37 @@ func TestEvolveNonce(t *testing.T) { result := evolveNonce(currentNonce, nonceValue) - // Verify manually - h, _ := blake2b.New256(nil) - h.Write(currentNonce) - h.Write(nonceValue) - expected := h.Sum(nil) + // XOR: 0xAA ^ 0xBB = 0x11, rest are 0x00 ^ 0x00 = 0x00 + if result[0] != 0x11 { + t.Fatalf("evolveNonce byte[0] mismatch: got 0x%x, want 0x11", result[0]) + } + for i := 1; i < 32; i++ { + if result[i] != 0 { + t.Fatalf("evolveNonce byte[%d] mismatch: got 0x%x, want 0x00", i, result[i]) + } + } +} - if hex.EncodeToString(result) != hex.EncodeToString(expected) { - t.Fatalf("evolveNonce mismatch:\n got: %s\n want: %s", - hex.EncodeToString(result), hex.EncodeToString(expected)) +func TestXorBytes(t *testing.T) { + a := make([]byte, 32) + b := make([]byte, 32) + for i := range a { + a[i] = byte(i) + b[i] = byte(0xFF - i) + } + result := xorBytes(a, b) + for i := range result { + expected := byte(i) ^ byte(0xFF-i) + if result[i] != expected { + t.Fatalf("xorBytes byte[%d]: got 0x%x, want 0x%x", i, result[i], expected) + } + } + + // XOR with zeros is identity + zeros := make([]byte, 32) + result = xorBytes(a, zeros) + if hex.EncodeToString(result) != hex.EncodeToString(a) { + t.Fatalf("xorBytes with zeros should be identity") } } diff --git a/store.go b/store.go index e92cd35..3d2b3f7 100644 --- a/store.go +++ b/store.go @@ -22,7 +22,7 @@ type BlockData struct { // BlockNonceRows is an iterator over blocks for nonce computation. type BlockNonceRows interface { Next() bool - Scan() (epoch int, slot uint64, nonceValue []byte, err error) + Scan() (epoch int, slot uint64, nonceValue []byte, blockHash string, err error) Close() Err() error } @@ -332,7 +332,7 @@ func (s *SqliteStore) InsertBlockBatch(ctx context.Context, blocks []BlockData) func (s *SqliteStore) StreamBlockNonces(ctx context.Context) (BlockNonceRows, error) { rows, err := s.db.QueryContext(ctx, - `SELECT epoch, slot, nonce_value FROM blocks ORDER BY slot`, + `SELECT epoch, slot, nonce_value, block_hash FROM blocks ORDER BY slot`, ) if err != nil { return nil, err @@ -410,12 +410,13 @@ func (s *SqliteStore) GetLeaderSchedule(ctx context.Context, epoch int) (*Leader // sqliteBlockNonceRows wraps sql.Rows to implement BlockNonceRows. type sqliteBlockNonceRows struct { - rows *sql.Rows - epoch int - slot uint64 - nonce []byte - err error - closed bool + rows *sql.Rows + epoch int + slot uint64 + nonce []byte + blockHash string + err error + closed bool } func (r *sqliteBlockNonceRows) Next() bool { @@ -428,13 +429,13 @@ func (r *sqliteBlockNonceRows) Next() bool { return false } var slotInt64 int64 - r.err = r.rows.Scan(&r.epoch, &slotInt64, &r.nonce) + r.err = r.rows.Scan(&r.epoch, &slotInt64, &r.nonce, &r.blockHash) r.slot = uint64(slotInt64) return r.err == nil } -func (r *sqliteBlockNonceRows) Scan() (epoch int, slot uint64, nonceValue []byte, err error) { - return r.epoch, r.slot, r.nonce, r.err +func (r *sqliteBlockNonceRows) Scan() (epoch int, slot uint64, nonceValue []byte, blockHash string, err error) { + return r.epoch, r.slot, r.nonce, r.blockHash, r.err } func (r *sqliteBlockNonceRows) Close() {