Skip to content
Merged
Show file tree
Hide file tree
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
21 changes: 11 additions & 10 deletions db.go
Original file line number Diff line number Diff line change
Expand Up @@ -272,7 +272,7 @@ func (s *PgStore) InsertBlockBatch(ctx context.Context, blocks []BlockData) erro

func (s *PgStore) StreamBlockNonces(ctx context.Context) (BlockNonceRows, error) {
rows, err := s.pool.Query(ctx,
`SELECT epoch, slot, nonce_value FROM blocks ORDER BY slot`,
`SELECT epoch, slot, nonce_value, block_hash FROM blocks ORDER BY slot`,
)
if err != nil {
return nil, err
Expand Down Expand Up @@ -343,12 +343,13 @@ func (s *PgStore) GetLeaderSchedule(ctx context.Context, epoch int) (*LeaderSche

// pgBlockNonceRows wraps pgx.Rows to implement BlockNonceRows.
type pgBlockNonceRows struct {
rows pgx.Rows
epoch int
slot uint64
nonce []byte
err error
closed bool
rows pgx.Rows
epoch int
slot uint64
nonce []byte
blockHash string
err error
closed bool
}

func (r *pgBlockNonceRows) Next() bool {
Expand All @@ -361,13 +362,13 @@ func (r *pgBlockNonceRows) Next() bool {
return false
}
var slotInt64 int64
r.err = r.rows.Scan(&r.epoch, &slotInt64, &r.nonce)
r.err = r.rows.Scan(&r.epoch, &slotInt64, &r.nonce, &r.blockHash)
r.slot = uint64(slotInt64)
return r.err == nil
}

func (r *pgBlockNonceRows) Scan() (epoch int, slot uint64, nonceValue []byte, err error) {
return r.epoch, r.slot, r.nonce, r.err
func (r *pgBlockNonceRows) Scan() (epoch int, slot uint64, nonceValue []byte, blockHash string, err error) {
return r.epoch, r.slot, r.nonce, r.blockHash, r.err
}

func (r *pgBlockNonceRows) Close() {
Expand Down
38 changes: 19 additions & 19 deletions epoch612_integration_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,6 @@ import (
"os"
"testing"
"time"

"golang.org/x/crypto/blake2b"
)

func TestEpoch612LeaderSchedule(t *testing.T) {
Expand All @@ -36,9 +34,9 @@ func TestEpoch612LeaderSchedule(t *testing.T) {
ctx := context.Background()

// === Step 1: Compute epoch 612 nonce from chain data ===
// Stream ALL blocks from Shelley genesis, evolving nonce exactly like the bot does.
// Stream ALL blocks from Shelley genesis, evolving nonce via XOR (Nonce semigroup).
// At each epoch's 60% stability window: freeze candidate nonce.
// At each epoch boundary: compute epoch_nonce(e+1) = hash(eta_c(e) || eta_0(e)).
// At each epoch boundary: TICKN rule — η(new) = η_c XOR η_ph

overallStart := time.Now()
nonceStart := time.Now()
Expand All @@ -48,7 +46,9 @@ func TestEpoch612LeaderSchedule(t *testing.T) {
copy(etaV, genesisHash)
eta0 := make([]byte, 32) // epoch nonce — eta_0(208) = shelley genesis hash
copy(eta0, genesisHash)
etaC := make([]byte, 32) // candidate nonce (frozen at stability window)
etaC := make([]byte, 32) // candidate nonce (frozen at stability window)
prevHashNonce := make([]byte, 32) // η_ph — NeutralNonce at Shelley start
var lastBlockHash string

currentEpoch := ShelleyStartEpoch
candidateFrozen := false
Expand All @@ -57,7 +57,7 @@ func TestEpoch612LeaderSchedule(t *testing.T) {
log.Printf("Streaming blocks from Shelley genesis to compute epoch nonces...")

rows, err := store.pool.Query(ctx,
"SELECT epoch, slot, nonce_value FROM blocks WHERE epoch <= 611 ORDER BY slot ASC")
"SELECT epoch, slot, nonce_value, block_hash FROM blocks WHERE epoch <= 611 ORDER BY slot ASC")
if err != nil {
t.Fatalf("Failed to query blocks: %v", err)
}
Expand All @@ -67,29 +67,29 @@ func TestEpoch612LeaderSchedule(t *testing.T) {
var epoch int
var slot uint64
var nonceValue []byte
if err := rows.Scan(&epoch, &slot, &nonceValue); err != nil {
var blockHash string
if err := rows.Scan(&epoch, &slot, &nonceValue, &blockHash); err != nil {
t.Fatalf("Scan failed: %v", err)
}

// Epoch transition
// Epoch transition — TICKN rule: η(new) = η_c ⊕ η_ph
if epoch != currentEpoch {
// If we didn't freeze candidate yet (epoch had < 60% blocks), freeze now
if !candidateFrozen {
etaC = make([]byte, 32)
copy(etaC, etaV)
}
// Compute epoch nonce for new epoch: eta_0(e+1) = hash(eta_c(e) || eta_0(e))
h, _ := blake2b.New256(nil)
h.Write(etaC)
h.Write(eta0)
eta0 = h.Sum(nil)
eta0 = xorBytes(etaC, prevHashNonce)
if lastBlockHash != "" {
prevHashNonce, _ = hex.DecodeString(lastBlockHash)
}

currentEpoch = epoch
candidateFrozen = false
}

// Evolve eta_v
// Evolve eta_v via XOR
etaV = evolveNonce(etaV, nonceValue)
lastBlockHash = blockHash
blockCount++

// Freeze candidate at 60% stability window
Expand All @@ -112,10 +112,10 @@ func TestEpoch612LeaderSchedule(t *testing.T) {
etaC = make([]byte, 32)
copy(etaC, etaV)
}
h, _ := blake2b.New256(nil)
h.Write(etaC)
h.Write(eta0)
epoch612Nonce := h.Sum(nil)
if lastBlockHash != "" {
prevHashNonce, _ = hex.DecodeString(lastBlockHash)
}
epoch612Nonce := xorBytes(etaC, prevHashNonce)

nonceElapsed := time.Since(nonceStart)
log.Printf("Nonce computation: %d blocks processed in %v", blockCount, nonceElapsed)
Expand Down
2 changes: 1 addition & 1 deletion go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ go 1.24.0
toolchain go1.24.11

require (
github.com/blinklabs-io/adder v0.37.0
github.com/blinklabs-io/adder v0.37.1-0.20260209154719-460d03ed24c1
github.com/blinklabs-io/gouroboros v0.153.1
github.com/cardano-community/koios-go-client/v3 v3.1.3
github.com/cenkalti/backoff/v4 v4.3.0
Expand Down
4 changes: 2 additions & 2 deletions go.sum
Original file line number Diff line number Diff line change
Expand Up @@ -11,8 +11,8 @@ github.com/aws/aws-sdk-go v1.55.6 h1:cSg4pvZ3m8dgYcgqB97MrcdjUmZ1BeMYKUxMMB89IPk
github.com/aws/aws-sdk-go v1.55.6/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU=
github.com/bits-and-blooms/bitset v1.24.4 h1:95H15Og1clikBrKr/DuzMXkQzECs1M6hhoGXLwLQOZE=
github.com/bits-and-blooms/bitset v1.24.4/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8=
github.com/blinklabs-io/adder v0.37.0 h1:yfBA+4S34LLgqOcOS5H5XIOvNEhmdypEV7xw+ES/nQ4=
github.com/blinklabs-io/adder v0.37.0/go.mod h1:CJsbQyGJKGgDksRXgHxda5Uco11HSXhYmLqjcxrGFH8=
github.com/blinklabs-io/adder v0.37.1-0.20260209154719-460d03ed24c1 h1:C6Y03ERl3OFKknBKSpTnbckbJ1Zr0hJm5sbgfvGu9aU=
github.com/blinklabs-io/adder v0.37.1-0.20260209154719-460d03ed24c1/go.mod h1:a8OjDZFulnrpWAzPZR/htfHzc2gPRL+Lm975fK6Hm4Q=
github.com/blinklabs-io/gouroboros v0.153.1 h1:9Jj4hHFrVmmUbFAg4Jg8p8R07Cimb7rXJL5xrimGOi8=
github.com/blinklabs-io/gouroboros v0.153.1/go.mod h1:MTwq+I/IMtzzWGN2Jd87uuryMfOD+3mQGYNFJn1PSFY=
github.com/blinklabs-io/ouroboros-mock v0.9.0 h1:O4FhgxKt43RcZGcxRQAOV9GMF6F06qtpU76eFeBKWeQ=
Expand Down
3 changes: 3 additions & 0 deletions helm-chart/templates/configmap.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,9 @@ data:
{{- end }}
timezone: {{ .Values.config.leaderlog.timezone | quote }}
timeFormat: {{ .Values.config.leaderlog.timeFormat | default "12h" | quote }}
{{- if .Values.config.leaderlog.ntcQueryTimeout }}
ntcQueryTimeout: {{ .Values.config.leaderlog.ntcQueryTimeout | quote }}
{{- end }}
database:
driver: {{ .Values.config.database.driver | quote }}
{{- if eq .Values.config.database.driver "sqlite" }}
Expand Down
2 changes: 2 additions & 0 deletions helm-chart/values.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,8 @@ config:
timezone: ""
# Time format: "12h" or "24h"
timeFormat: "12h"
# NtC query timeout for stake snapshot queries (Go duration, e.g. "10m", "30m")
ntcQueryTimeout: "10m"

database:
driver: "sqlite"
Expand Down
9 changes: 7 additions & 2 deletions localquery.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,15 +18,20 @@ import (
type NodeQueryClient struct {
nodeAddress string
networkMagic uint32
queryTimeout time.Duration
}

// NewNodeQueryClient creates a new node query client.
// nodeAddress can be a TCP address ("host:port"), a UNIX socket path ("/ipc/node.socket"),
// or explicitly prefixed ("unix:///ipc/node.socket", "tcp://host:port").
func NewNodeQueryClient(nodeAddress string, networkMagic int) *NodeQueryClient {
func NewNodeQueryClient(nodeAddress string, networkMagic int, queryTimeout time.Duration) *NodeQueryClient {
if queryTimeout == 0 {
queryTimeout = 10 * time.Minute
}
return &NodeQueryClient{
nodeAddress: nodeAddress,
networkMagic: uint32(networkMagic),
queryTimeout: queryTimeout,
}
}

Expand Down Expand Up @@ -68,7 +73,7 @@ func (c *NodeQueryClient) withQuery(ctx context.Context, fn func(*localstatequer
ouroboros.WithKeepAlive(false),
ouroboros.WithLocalStateQueryConfig(
localstatequery.NewConfig(
localstatequery.WithQueryTimeout(10*time.Minute),
localstatequery.WithQueryTimeout(c.queryTimeout),
),
),
)
Expand Down
5 changes: 3 additions & 2 deletions main.go
Original file line number Diff line number Diff line change
Expand Up @@ -287,8 +287,9 @@ func (i *Indexer) Start() error {
}
if ntcHost != "" {
network, address := parseNodeAddress(ntcHost)
i.nodeQuery = NewNodeQueryClient(ntcHost, i.networkMagic)
log.Printf("Node query client initialized (NtC): %s://%s", network, address)
ntcQueryTimeout := viper.GetDuration("leaderlog.ntcQueryTimeout")
i.nodeQuery = NewNodeQueryClient(ntcHost, i.networkMagic, ntcQueryTimeout)
log.Printf("Node query client initialized (NtC): %s://%s (timeout: %v)", network, address, i.nodeQuery.queryTimeout)
}

// Twitter toggle
Expand Down
62 changes: 38 additions & 24 deletions nonce.go
Original file line number Diff line number Diff line change
Expand Up @@ -77,13 +77,20 @@ func vrfNonceValue(vrfOutput []byte) []byte {
return h.Sum(nil)
}

// xorBytes XORs two 32-byte slices. Implements the Cardano Nonce semigroup:
// Nonce a <> Nonce b = Nonce (Hash.xor a b)
func xorBytes(a, b []byte) []byte {
result := make([]byte, 32)
for i := 0; i < 32; i++ {
result[i] = a[i] ^ b[i]
}
return result
}

// evolveNonce updates the evolving nonce with a new nonce contribution.
// eta_v = BLAKE2b-256(eta_v || nonceValue)
// eta_v = eta_v XOR nonceValue (Cardano Nonce semigroup)
func evolveNonce(currentNonce, nonceValue []byte) []byte {
h, _ := blake2b.New256(nil)
h.Write(currentNonce)
h.Write(nonceValue)
return h.Sum(nil)
return xorBytes(currentNonce, nonceValue)
}

// ProcessBlock processes a block's VRF output for nonce evolution.
Expand Down Expand Up @@ -246,8 +253,10 @@ func (nt *NonceTracker) GetNonceForEpoch(epoch int) ([]byte, error) {
}

// ComputeEpochNonce computes the epoch nonce for targetEpoch entirely from local chain data.
// Streams all blocks from Shelley genesis, evolving the nonce and freezing at the
// stability window (60%) of each epoch, then computing epoch_nonce = hash(eta_c || eta_0).
// Streams all blocks from Shelley genesis, evolving the nonce via XOR and freezing at the
// stability window (60%) of each epoch, then applying the TICKN transition rule:
//
// η(new) = η_c XOR η_ph (candidate nonce XOR previous block hash nonce)
func (nt *NonceTracker) ComputeEpochNonce(ctx context.Context, targetEpoch int) ([]byte, error) {
shelleyStart := ShelleyStartEpoch
if nt.networkMagic == PreprodNetworkMagic {
Expand All @@ -263,6 +272,8 @@ func (nt *NonceTracker) ComputeEpochNonce(ctx context.Context, targetEpoch int)
eta0 := make([]byte, 32) // eta_0(shelleyStart) = shelley genesis hash
copy(eta0, genesisHash)
etaC := make([]byte, 32)
prevHashNonce := make([]byte, 32) // η_ph — NeutralNonce at Shelley start
var lastBlockHash string

currentEpoch := shelleyStart
candidateFrozen := false
Expand All @@ -274,22 +285,21 @@ func (nt *NonceTracker) ComputeEpochNonce(ctx context.Context, targetEpoch int)
defer rows.Close()

for rows.Next() {
epoch, slot, nonceValue, err := rows.Scan()
epoch, slot, nonceValue, blockHash, err := rows.Scan()
if err != nil {
return nil, fmt.Errorf("scanning block: %w", err)
}

// Epoch transition
// Epoch transition — TICKN rule: η(new) = η_c ⊕ η_ph
if epoch != currentEpoch {
if !candidateFrozen {
etaC = make([]byte, 32)
copy(etaC, etaV)
}
// eta_0(new) = hash(eta_c(old) || eta_0(old))
h, _ := blake2b.New256(nil)
h.Write(etaC)
h.Write(eta0)
eta0 = h.Sum(nil)
eta0 = xorBytes(etaC, prevHashNonce)
if lastBlockHash != "" {
prevHashNonce, _ = hex.DecodeString(lastBlockHash)
}
Comment on lines +299 to +302
Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟠 Major

Silent discard of hex.DecodeString error could cause a downstream panic in xorBytes.

If lastBlockHash contains invalid hex (e.g., corrupted DB data or odd-length string), hex.DecodeString returns a short/nil slice and the discarded error hides the root cause. The next call to xorBytes(etaC, prevHashNonce) would then panic or produce a wrong nonce.

The same pattern appears at lines 339–341 and in BackfillNonces at lines 394–396.

🛡️ Proposed fix (apply to all three occurrences)
 			if lastBlockHash != "" {
-				prevHashNonce, _ = hex.DecodeString(lastBlockHash)
+				decoded, decErr := hex.DecodeString(lastBlockHash)
+				if decErr != nil || len(decoded) != 32 {
+					return nil, fmt.Errorf("invalid block hash %q for η_ph: %w", lastBlockHash, decErr)
+				}
+				prevHashNonce = decoded
 			}
🤖 Prompt for AI Agents
In `@nonce.go` around lines 299 - 302, The code silently ignores errors from
hex.DecodeString when decoding lastBlockHash before calling xorBytes, which can
yield a nil/short prevHashNonce and cause panics or wrong nonces; update each
occurrence (the blocks setting prevHashNonce before calling xorBytes at the
xorBytes(etaC, prevHashNonce) call sites and inside BackfillNonces) to check the
error returned by hex.DecodeString(lastBlockHash), handle it (return or
propagate an error) and only call xorBytes when prevHashNonce was successfully
decoded; ensure any error includes context (e.g., lastBlockHash value or a
descriptive message) so callers can handle corrupted/invalid hex data.


// If we just transitioned INTO the target epoch, we have eta_0(target)
if epoch == targetEpoch {
Expand All @@ -304,6 +314,7 @@ func (nt *NonceTracker) ComputeEpochNonce(ctx context.Context, targetEpoch int)

// Evolve eta_v
etaV = evolveNonce(etaV, nonceValue)
lastBlockHash = blockHash

// Freeze candidate at stability window
if !candidateFrozen {
Expand All @@ -325,10 +336,10 @@ func (nt *NonceTracker) ComputeEpochNonce(ctx context.Context, targetEpoch int)
etaC = make([]byte, 32)
copy(etaC, etaV)
}
h, _ := blake2b.New256(nil)
h.Write(etaC)
h.Write(eta0)
result := h.Sum(nil)
if lastBlockHash != "" {
prevHashNonce, _ = hex.DecodeString(lastBlockHash)
}
result := xorBytes(etaC, prevHashNonce)
log.Printf("Computed nonce for epoch %d: %s", targetEpoch, hex.EncodeToString(result))
return result, nil
}
Expand All @@ -349,6 +360,8 @@ func (nt *NonceTracker) BackfillNonces(ctx context.Context) error {
eta0 := make([]byte, 32)
copy(eta0, genesisHash)
etaC := make([]byte, 32)
prevHashNonce := make([]byte, 32) // η_ph — NeutralNonce at Shelley start
var lastBlockHash string

currentEpoch := shelleyStart
candidateFrozen := false
Expand All @@ -366,21 +379,21 @@ func (nt *NonceTracker) BackfillNonces(ctx context.Context) error {
defer rows.Close()

for rows.Next() {
epoch, slot, nonceValue, scanErr := rows.Scan()
epoch, slot, nonceValue, blockHash, scanErr := rows.Scan()
if scanErr != nil {
return fmt.Errorf("scanning block: %w", scanErr)
}

// Epoch transition — compute nonce for the new epoch
// Epoch transition — TICKN rule: η(new) = η_c ⊕ η_ph
if epoch != currentEpoch {
if !candidateFrozen {
etaC = make([]byte, 32)
copy(etaC, etaV)
}
h, _ := blake2b.New256(nil)
h.Write(etaC)
h.Write(eta0)
eta0 = h.Sum(nil)
eta0 = xorBytes(etaC, prevHashNonce)
if lastBlockHash != "" {
prevHashNonce, _ = hex.DecodeString(lastBlockHash)
}

// Cache if not already present
existing, _ := nt.store.GetFinalNonce(ctx, epoch)
Expand All @@ -401,6 +414,7 @@ func (nt *NonceTracker) BackfillNonces(ctx context.Context) error {

// Evolve eta_v
etaV = evolveNonce(etaV, nonceValue)
lastBlockHash = blockHash

// Freeze candidate at stability window
if !candidateFrozen {
Expand Down
Loading