diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index cc6d6c9d5..5c026af21 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -432,7 +432,7 @@ jobs: - name: Run Simnet Tests run: | cargo build -p hyperbridge --release - ./target/release/hyperbridge simnode --chain=gargantua-2000 --name=alice --tmp --state-pruning=archive --blocks-pruning=archive --rpc-port=9990 --port 40337 --log="mmr=trace" --rpc-cors=all --unsafe-rpc-external --rpc-methods=unsafe --pool-type=single-state & + ./target/release/hyperbridge simnode --chain=gargantua-4009 --name=alice --tmp --state-pruning=archive --blocks-pruning=archive --rpc-port=9990 --port 40337 --log="mmr=trace" --rpc-cors=all --unsafe-rpc-external --rpc-methods=unsafe --pool-type=single-state & ./scripts/wait_for_tcp_port_opening.sh localhost 9990 cargo test -p simtests -- --nocapture --ignored --test-threads=1 --skip test_runtime_upgrade_and_fee_migration --skip legacy_storage_items_state_drain_test diff --git a/Cargo.lock b/Cargo.lock index 3e008b622..985e23a31 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -24186,7 +24186,10 @@ checksum = "620a1d43d70e142b1d46a929af51d44f383db9c7a2ec122de2cd992ccfcf3c18" name = "simtests" version = "0.1.1" dependencies = [ + "alloy-sol-types 1.5.7", "anyhow", + "beefy-prover", + "beefy-verifier-primitives", "ckb-merkle-mountain-range", "crypto-utils", "futures", @@ -24197,6 +24200,7 @@ dependencies = [ "indicatif 0.17.11", "ismp", "ismp-parachain", + "ismp-solidity-abi", "jsonrpsee-core 0.24.10", "mmr-primitives", "nexus-runtime", diff --git a/modules/consensus/beefy/verifier/src/error.rs b/modules/consensus/beefy/verifier/src/error.rs index 0a817eb43..2536c6c8d 100644 --- a/modules/consensus/beefy/verifier/src/error.rs +++ b/modules/consensus/beefy/verifier/src/error.rs @@ -1,28 +1,56 @@ +//! Error types raised by the BEEFY verifier and its SP1 sibling. + use alloc::string::String; use thiserror::Error; +/// All failures the BEEFY proof verifiers can raise. Dispatchers may match on +/// [`Error::StaleHeight`] to distinguish a benign uncle attempt from a hard verification +/// failure; everything else is opaque and indicates the proof itself is invalid. #[derive(Error, Debug)] pub enum Error { + /// `trusted_state.latest_beefy_height >= proof.block_number`. Surfaced before any + /// cryptographic work, so it's cheap to recognise and re-route as an uncle. #[error("Stale height: trusted height {trusted_height} >= current_height {current_height}")] - StaleHeight { trusted_height: u32, current_height: u32 }, + StaleHeight { + /// Trusted state height at verification time. + trusted_height: u32, + /// Block number reported by the proof. + current_height: u32, + }, + /// Fewer than the BEEFY supermajority threshold of authorities signed the commitment. #[error("Super majority of signatures required")] SuperMajorityRequired, + /// The commitment was signed by an authority set the verifier does not know about. #[error("Unkown authority set id {id}")] - UnknownAuthoritySet { id: u64 }, + UnknownAuthoritySet { + /// Unknown authority set id from the commitment. + id: u64, + }, + /// The signed commitment payload is missing its MMR root hash entry. #[error("MMR root hash is missing from commitment payload")] MmrRootHashMissing, + /// The MMR root hash entry is the wrong length (expected 32 bytes). #[error("Invalid MMR root hash length: expected 32, found {len}")] - InvalidMmrRootHashLength { len: usize }, + InvalidMmrRootHashLength { + /// Actual length found. + len: usize, + }, + /// `secp256k1` ecrecover did not return a public key for one of the signatures. #[error("Failed to recover public key from signature")] FailedToRecoverPublicKey, + /// The merkle multi-proof of the signing authorities does not verify. #[error("Invalid authorities proof")] InvalidAuthoritiesProof, + /// MMR-leaf-vs-root verification raised an internal error. #[error("MMR verification failed during calculation: {0}")] MmrVerificationFailed(String), + /// MMR-leaf-vs-root verification ran but the calculated root differs from the proven root. #[error("Invalid MMR proof: calculated root does not match provided root")] InvalidMmrProof, + /// The merkle proof of parachain headers does not verify. #[error("Invalid parachain header proof: merkle proof verification failed")] InvalidParachainProof, + /// The SP1 Groth16 verifier rejected the proof bytes. #[error("SP1 proof verification failed")] Sp1VerificationFailed, } diff --git a/modules/consensus/beefy/verifier/src/lib.rs b/modules/consensus/beefy/verifier/src/lib.rs index d0209e2aa..bd9e13d1b 100644 --- a/modules/consensus/beefy/verifier/src/lib.rs +++ b/modules/consensus/beefy/verifier/src/lib.rs @@ -23,7 +23,7 @@ extern crate alloc; -mod error; +pub mod error; pub mod sp1; #[cfg(test)] mod test; diff --git a/modules/ismp/clients/beefy/src/consensus.rs b/modules/ismp/clients/beefy/src/consensus.rs index 77b9e2794..4dd350c2f 100644 --- a/modules/ismp/clients/beefy/src/consensus.rs +++ b/modules/ismp/clients/beefy/src/consensus.rs @@ -90,9 +90,8 @@ where PROOF_TYPE_NAIVE => { let consensus_proof: ConsensusMessage = codec::Decode::decode(&mut &payload[..]) .map_err(|e| Error::Custom(format!("Cannot decode naive proof: {e:?}")))?; - verify_consensus::(consensus_state, consensus_proof).map_err( - |e| Error::Custom(format!("Error verifying naive consensus update: {e:?}")), - )? + verify_consensus::(consensus_state, consensus_proof) + .map_err(|e| Error::AnyHow(e.into()))? }, PROOF_TYPE_SP1 => { let sp1_proof: Sp1BeefyProof = codec::Decode::decode(&mut &payload[..]) @@ -105,9 +104,7 @@ where sp1_proof, vkey_hash, ) - .map_err(|e| { - Error::Custom(format!("Error verifying SP1 consensus update: {e:?}")) - })? + .map_err(|e| Error::AnyHow(anyhow::Error::new(e).into()))? }, _ => return Err(Error::Custom(format!("Unknown proof type: {proof_type}"))), }; diff --git a/modules/pallets/beefy-consensus-proofs/src/benchmarking.rs b/modules/pallets/beefy-consensus-proofs/src/benchmarking.rs index 4def60745..72925156b 100644 --- a/modules/pallets/beefy-consensus-proofs/src/benchmarking.rs +++ b/modules/pallets/beefy-consensus-proofs/src/benchmarking.rs @@ -16,73 +16,90 @@ #![cfg(feature = "runtime-benchmarks")] use super::*; -use codec::Encode; use frame_benchmarking::v2::*; use frame_system::RawOrigin; use polkadot_sdk::*; -use sp_core::crypto::KeyTypeId; - -/// SCALE-encoded `beefy_verifier_primitives::ConsensusState` and wire-format proof -/// (`[PROOF_TYPE_SP1] ++ SCALE(Sp1BeefyProof)`) for the SP1 Groth16 fixture used in -/// `evm/test/SP1BeefyTest.sol::testVerifySp1Optional`. Produced by the ignored helper -/// `beefy_verifier::test::dump_sp1_fixture_scale_bytes`. -/// -/// The fixture's original `next_authorities.id` is 0x1276 (same as the leaf's -/// `beefy_next_authority_set.id`), which would keep `current_authorities.id` unchanged -/// across the update — i.e. not a rotation, a messaging-only update. We rewrite -/// `next_authorities.id` to 0x1275 so the update is a rotation (new_current = -/// prev_next > prev_current). SP1 public inputs only commit to -/// `authority.keyset_commitment` and `authority.len`, not `id`, so the proof still -/// verifies. -const TRUSTED_STATE_SCALE: [u8; 128] = hex_literal::hex!("2279d60118532a010000000000000000000000000000000000000000000000000000000000000000751200000000000057020000a7161e52f2f4249039441385a41c6c8e36207a9b6a65d9bfae4272156ec31f49751200000000000057020000a7161e52f2f4249039441385a41c6c8e36207a9b6a65d9bfae4272156ec31f49"); - -const WIRE_PROOF: [u8; 808] = hex_literal::hex!("012a79d6017512000000000000002979d601e1dbc67b9da4b90227fb3dc2e7ffdce4e120d583502399e4bd083c02651ca5eb761200000000000057020000a7161e52f2f4249039441385a41c6c8e36207a9b6a65d9bfae4272156ec31f4963bc2eb07f9c83afe64eb8815b626cd0a7d2a1bbb4630a44a1896af297d0135d04e504739e9bd7f1addf87db9b6a762bd0e1713baa895c3b82b4595080e5ba02fb5b3cf2915702b49122c32b822e6a11384074d8902d5ea5f79c7cb0d7804e49501b8b532298f49e38d3f7140ce1ba61c243152e4e380b37eb628e08d5270d8b2c5e4ebedd84bb14066175726120fbc4d208000000000452505352902a869d4e00b3bb93f1e88e41a2b5f51fc637626b4ce1da15749ef2d79de4797a9ae459070449534d50010118a13886ac93d163a1d22cdef94e018eba5189424a66b7bd03a5ac232beb46bf08b0f9d2b979fff833d7e21a64a5183c61e2630c0b452236baba3c1b4ff41821044953544d20ca3be169000000000561757261010152d45dea4dcf058b0610e12981e0e4c97ad153f26481510c0b78beedf1848b4dd2abd37b8c6b800b72fa12199898eca7651471b49e38d6167a84fb6e2df7c78400000000270d000091054388a21c0000000000000000000000000000000000000000000000000000000000000000002f850ee998974d6cc00e50cd0814b098c05bfade466d28573240d057f2535200000000000000000000000000000000000000000000000000000000000000002ac5e596c552ee76353c176f0870e47a0aa765ceafc4c65b03dbf434e27fa9062f185bdc40f7aae982c1c8c6b766dd491a1e1cd60128efbc58da965e5be96320287f4ce1b04538f0c8287c8eff096c36df67dc17970032546c9b3d4dd5510c5c25e880e13469e1e1aca1b41c367f2ecf04da65f7602fb53ec212b03d0148157b2cd9a79a9779f350d240e6d4c980848302fca8c7447c5fa7ac8d3c6eefcd0c640acff8b27ea316db978652553e3d054765094cf0dab6085a616489cdb973c42b258e22f346ac3ceb3e2e6750c37dad1f98f6ca15d1f70659343caa52dbbcad150b75dd2dcf0ba0a664ea4605b291df54ab1aa5b4c55034b9425ba29cc87eca7b"); +use sp_core::Get; + +/// SCALE-encoded `beefy_verifier_primitives::ConsensusState` for the SP1 Groth16 fixture +/// used in `evm/tests/foundry/SP1BeefyTest.sol::testVerifySp1Optional`. The first 4 bytes +/// (`latest_beefy_height` LE) decode to 30_832_930 = 0x01d67922, which is below the +/// fixture proof's `blockNumber = 0x01d6792a`. Used as the pre-proof snapshot in +/// `ProofContext` so `settle_uncle_proof`'s SP1 verifier sees a valid trusted state. +const TRUSTED_STATE_SCALE: [u8; 128] = hex_literal::hex!("2279d60118532a010000000000000000000000000000000000000000000000000000000000000000751200000000000057020000a7161e52f2f4249039441385a41c6c8e36207a9b6a65d9bfae4272156ec31f49761200000000000057020000a7161e52f2f4249039441385a41c6c8e36207a9b6a65d9bfae4272156ec31f49"); + +/// Same fixture as `TRUSTED_STATE_SCALE` but with `latest_beefy_height` bumped to +/// 30_832_938 = 0x01d6792a (first byte `22` → `2a`), which equals the fixture proof's +/// `blockNumber`. Used as the live consensus state so the SP1 verifier inside +/// `BeefyConsensusClient::verify_consensus` returns `StaleHeight` cheaply (its own +/// upfront check, before any cryptographic work). The pallet maps that to `StaleProof`, +/// dispatch routes to `settle_uncle_proof`, and SP1 runs once there. Net cost on the +/// measured path: one SP1 verification + uncle storage writes. +const LIVE_STATE_SCALE: [u8; 128] = hex_literal::hex!("2a79d60118532a010000000000000000000000000000000000000000000000000000000000000000751200000000000057020000a7161e52f2f4249039441385a41c6c8e36207a9b6a65d9bfae4272156ec31f49761200000000000057020000a7161e52f2f4249039441385a41c6c8e36207a9b6a65d9bfae4272156ec31f49"); + +/// Wire-format proof: `[PROOF_TYPE_SP1] ++ abi.encode(SP1BeefyProof)` (without the outer +/// struct offset, matching what `::abi_decode_params` accepts). +/// ABI bytes lifted verbatim from `SP1BeefyTest.sol::testVerifySp1Optional`. +const WIRE_PROOF: [u8; 1249] = hex_literal::hex!("010000000000000000000000000000000000000000000000000000000001d6792a000000000000000000000000000000000000000000000000000000000000127500000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001d67929e1dbc67b9da4b90227fb3dc2e7ffdce4e120d583502399e4bd083c02651ca5eb00000000000000000000000000000000000000000000000000000000000012760000000000000000000000000000000000000000000000000000000000000257a7161e52f2f4249039441385a41c6c8e36207a9b6a65d9bfae4272156ec31f4963bc2eb07f9c83afe64eb8815b626cd0a7d2a1bbb4630a44a1896af297d0135d00000000000000000000000000000000000000000000000000000000000001600000000000000000000000000000000000000000000000000000000000000340000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000d2700000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000139739e9bd7f1addf87db9b6a762bd0e1713baa895c3b82b4595080e5ba02fb5b3cf2915702b49122c32b822e6a11384074d8902d5ea5f79c7cb0d7804e49501b8b532298f49e38d3f7140ce1ba61c243152e4e380b37eb628e08d5270d8b2c5e4ebedd84bb14066175726120fbc4d208000000000452505352902a869d4e00b3bb93f1e88e41a2b5f51fc637626b4ce1da15749ef2d79de4797a9ae459070449534d50010118a13886ac93d163a1d22cdef94e018eba5189424a66b7bd03a5ac232beb46bf08b0f9d2b979fff833d7e21a64a5183c61e2630c0b452236baba3c1b4ff41821044953544d20ca3be169000000000561757261010152d45dea4dcf058b0610e12981e0e4c97ad153f26481510c0b78beedf1848b4dd2abd37b8c6b800b72fa12199898eca7651471b49e38d6167a84fb6e2df7c7840000000000000000000000000000000000000000000000000000000000000000000000000001644388a21c0000000000000000000000000000000000000000000000000000000000000000002f850ee998974d6cc00e50cd0814b098c05bfade466d28573240d057f2535200000000000000000000000000000000000000000000000000000000000000002ac5e596c552ee76353c176f0870e47a0aa765ceafc4c65b03dbf434e27fa9062f185bdc40f7aae982c1c8c6b766dd491a1e1cd60128efbc58da965e5be96320287f4ce1b04538f0c8287c8eff096c36df67dc17970032546c9b3d4dd5510c5c25e880e13469e1e1aca1b41c367f2ecf04da65f7602fb53ec212b03d0148157b2cd9a79a9779f350d240e6d4c980848302fca8c7447c5fa7ac8d3c6eefcd0c640acff8b27ea316db978652553e3d054765094cf0dab6085a616489cdb973c42b258e22f346ac3ceb3e2e6750c37dad1f98f6ca15d1f70659343caa52dbbcad150b75dd2dcf0ba0a664ea4605b291df54ab1aa5b4c55034b9425ba29cc87eca7b00000000000000000000000000000000000000000000000000000000"); const FIXTURE_VKEY: &[u8] = b"0x0059fd0bff44da77999bb7974cbcf2ac7dc89e5869352f20a2f3cd46c9f53d5c"; -/// Benchmark-only key type id for the submitter keypair. -const BENCH_KEY: KeyTypeId = KeyTypeId(*b"bnch"); - #[benchmarks( where - T::AccountId: From<[u8; 32]> + Into<[u8; 32]>, + T::AccountId: From<[u8; 32]>, <::Currency as frame_support::traits::fungible::Inspect>::Balance: From, )] mod benchmarks { use super::*; + /// Benches the uncle path of `submit_proof` along the single-SP1 worst case. Setup + /// seeds the live consensus state with `latest_beefy_height` equal to the fixture + /// proof's `blockNumber`. When dispatch reaches `BeefyConsensusClient::verify_consensus`, + /// the inner SP1 verifier's own stale check (`beefy_verifier::error::Error::StaleHeight`) + /// returns immediately — before any cryptographic work — and the pallet maps that to + /// `StaleProof`. Dispatch then routes to `settle_uncle_proof`, which runs + /// `verify_sp1_consensus` exactly once against the pre-seeded snapshot in + /// `ProofContext`. The resulting weight covers one SP1 verification plus uncle storage + /// writes — also the right bound for the first-proof path, which runs SP1 once inside + /// `verify_and_apply`. #[benchmark] fn submit_proof() { - // Seed the consensus state and SP1 vkey the verifier will read. - pallet_ismp::ConsensusStates::::insert( - pallet::BEEFY_CONSENSUS_ID, - TRUSTED_STATE_SCALE.to_vec(), - ); + // Live consensus state is "ahead" of the proof so the verifier's own stale check + // exits before running SP1. `create_consensus_client` also writes + // `ConsensusStateClient`, `UnbondingPeriod`, and `ConsensusClientUpdateTime` so + // the BEEFY client is fully wired up. + pallet_ismp::Pallet::::create_consensus_client( + frame_system::RawOrigin::Root.into(), + ismp::messaging::CreateConsensusState { + consensus_state: LIVE_STATE_SCALE.to_vec(), + consensus_client_id: ismp_beefy::BEEFY_CONSENSUS_ID, + consensus_state_id: ismp_beefy::BEEFY_CONSENSUS_ID, + unbonding_period: T::UnbondingPeriod::get(), + challenge_periods: Default::default(), + state_machine_commitments: Default::default(), + }, + ) + .expect("create_consensus_client succeeds in benchmark setup"); pallet::Sp1VkeyHash::::put(FIXTURE_VKEY.to_vec()); - // Generate a deterministic SR25519 keypair via the benchmark keystore. - // `sr25519_generate` stores the keypair keyed by (BENCH_KEY, public) so - // `sr25519_sign` can look it up. The pubkey bytes double as the AccountId. - let public = sp_io::crypto::sr25519_generate(BENCH_KEY, None); - let submitter: T::AccountId = public.0.into(); + // Pre-seed the uncle snapshot at `Self::latest_height()` (0 with no parachain + // commitments stored). The snapshot's `latest_beefy_height` is below the proof's + // `blockNumber` so the SP1 verifier accepts the proof here. + pallet::ProofContext::::insert(0u64, TRUSTED_STATE_SCALE.to_vec()); - // Sign the canonical message exactly as `verify_and_apply` computes it. - let proof = WIRE_PROOF.to_vec(); - let proof_digest = sp_io::hashing::keccak_256(&proof); - let msg_preimage = (crate::types::SIGNATURE_DOMAIN, &submitter, proof_digest).encode(); - let signed_msg = sp_io::hashing::keccak_256(&msg_preimage); - let signature = sp_io::crypto::sr25519_sign(BENCH_KEY, &public, &signed_msg) - .expect("keystore has the just-generated keypair"); + // Any 32-byte AccountId works. The signed origin doesn't need a keystore entry + // for the actual signature, just a usable AccountId for reward payout. + let submitter: T::AccountId = [1u8; 32].into(); - let payload = crate::types::SubmitProofPayload { submitter, proof }; + let proof = + frame_support::BoundedVec::::truncate_from(WIRE_PROOF.to_vec()); #[extrinsic_call] - _(RawOrigin::None, payload, signature); + _(RawOrigin::Signed(submitter), proof); - // Fixture rewrites `next_authorities.id` to force the rotation path, so a - // single `RotationProofs` entry is recorded and `MessagingProofs` stays empty. - assert_eq!(pallet::RotationProofs::::get().len(), 1); - assert_eq!(pallet::MessagingProofs::::get().len(), 0); + // Uncle accepted at position 0; one hash recorded under height 0. + assert_eq!(pallet::ProverCount::::get(0u64), 1); + assert_eq!(pallet::AcceptedProofHashes::::get(0u64).len(), 1); } #[benchmark] @@ -105,6 +122,26 @@ mod benchmarks { assert_eq!(pallet::Sp1VkeyHash::::get(), vkey); } + #[benchmark] + fn set_reward_curve() { + // Suggested mainnet defaults: 100%, 80%, 60%, 40%, 20%. The curve is bounded by + // `MaxStoredProvers` (`MaxUncleProvers + 1`), covering position 0 plus every + // uncle slot. + let curve: frame_support::BoundedVec<(u32, u32), pallet::MaxStoredProvers> = + frame_support::BoundedVec::truncate_from(alloc::vec![ + (1u32, 1u32), + (4, 5), + (3, 5), + (2, 5), + (1, 5), + ]); + + #[extrinsic_call] + _(RawOrigin::Root, curve.clone()); + + assert_eq!(pallet::RewardCurve::::get(), curve); + } + // NOTE: `initialize_state` still has no benchmark because it requires an ABI-encoded // solidity `BeefyConsensusState` fixture. Add alongside the SP1 fixture once we need // its weight to be accurate. diff --git a/modules/pallets/beefy-consensus-proofs/src/lib.rs b/modules/pallets/beefy-consensus-proofs/src/lib.rs index f9013d117..0c9bbb8f1 100644 --- a/modules/pallets/beefy-consensus-proofs/src/lib.rs +++ b/modules/pallets/beefy-consensus-proofs/src/lib.rs @@ -16,18 +16,29 @@ //! # Pallet BEEFY Consensus Proofs //! //! Verifies BEEFY consensus proofs (primarily SP1 ZK) submitted by off-chain provers and -//! feeds the finalized parachain state commitments into `pallet-ismp`. Rewards submitters a -//! fixed amount from the treasury when a proof does useful work — either carries the -//! expected next authority-set rotation, or advances the latest proven parachain height -//! past a block in which new ISMP requests were dispatched. +//! feeds the finalized parachain state commitments into `pallet-ismp`. Rewards submitters +//! from the treasury when a proof does useful work — either carries the expected next +//! authority-set rotation, or advances the latest proven parachain height past a block +//! in which new ISMP requests were dispatched. //! -//! Proofs are submitted via **authenticated unsigned** extrinsics: the payload carries an -//! SR25519 signature over `(domain, submitter, keccak256(proof))`. The submitter account -//! is both the reward payee and the claimed signer. Full proof verification runs in -//! `ValidateUnsigned` so the tx pool only ever retains valid proofs. Replay is prevented -//! by the monotonic advance of `pallet-ismp::LatestStateMachineHeight` and the BEEFY authority set -//! id (tracked in `pallet-ismp`'s consensus state): resubmitting -//! the same bytes after a proof is applied trips `StaleProof` or `UnexpectedAuthoritySet`. +//! Proofs are submitted via **signed** extrinsics: the signer of the extrinsic is the +//! reward payee. The pallet sets `Pays::No` on accepted proofs so a successful prover +//! gets their fee refunded along with the reward; failed proofs pay the transaction +//! fee normally, which keeps spam off the chain. +//! +//! ## Uncle proofs +//! +//! Multiple SP1 provers running independently can each get rewarded for the same +//! finality target via decreasing-curve uncle rewards. The first prover to land a +//! proof advances state and gets position 0; subsequent independent provers +//! (different proof bytes thanks to SP1 Groth16 witness randomization) for the +//! same target are accepted as uncles, up to `MaxUncleProvers`, and rewarded at +//! decreasing positions. +//! +//! Uncle verification reuses the consensus state snapshot taken before the first +//! proof mutated it, so uncle proofs are checked cryptographically against the +//! same trusted state the first prover used. `keccak256(proof)` is recorded per +//! parachain height to reject re-submission of bytes that were already accepted. #![cfg_attr(not(feature = "std"), no_std)] @@ -41,7 +52,6 @@ pub mod weights; use polkadot_sdk::*; pub use pallet::*; -pub use types::{Signature, SubmitProofPayload}; pub use weights::WeightInfo; #[frame_support::pallet] @@ -51,6 +61,7 @@ pub mod pallet { use alloy_sol_types::SolType; use codec::{Decode, Encode}; use frame_support::{ + dispatch::{DispatchResultWithPostInfo, Pays, PostDispatchInfo}, pallet_prelude::*, traits::{ fungible::{self, Inspect, Mutate}, @@ -60,27 +71,27 @@ pub mod pallet { }; use frame_system::pallet_prelude::*; use ismp::{ - consensus::{ConsensusClientId, ConsensusStateId, StateMachineHeight, StateMachineId}, - events::StateMachineUpdated, + consensus::{ConsensusStateId, StateMachineHeight, StateMachineId}, handlers, host::IsmpHost, messaging::{ConsensusMessage as IsmpConsensusMessage, Message}, }; use ismp_solidity_abi::beefy::BeefyConsensusState as SolBeefyConsensusState; use primitive_types::H256; - use sp_core::sr25519; - use sp_runtime::{ - traits::AccountIdConversion, - transaction_validity::{ - InvalidTransaction, TransactionLongevity, TransactionSource, TransactionValidity, - TransactionValidityError, ValidTransaction, - }, - }; - - use crate::types::{Signature, SubmitProofPayload}; - - /// Longevity for proofs in the tx pool, in blocks. - const PROOF_LONGEVITY: TransactionLongevity = 15; + use sp_runtime::traits::AccountIdConversion; + + type BalanceOf = + <::Currency as Inspect<::AccountId>>::Balance; + + /// `Get` adapter that yields `MaxUncleProvers + 1`, the total number of provers + /// (one first + `MaxUncleProvers` uncles) that may be rewarded per parachain height. + /// Used as the bound for `AcceptedProofHashes` and `RewardCurve`. + pub struct MaxStoredProvers(core::marker::PhantomData); + impl Get for MaxStoredProvers { + fn get() -> u32 { + T::MaxUncleProvers::get().saturating_add(1) + } + } #[pallet::pallet] #[pallet::without_storage_info] @@ -89,7 +100,7 @@ pub mod pallet { #[pallet::config] pub trait Config: polkadot_sdk::frame_system::Config + pallet_ismp::Config { /// Origin permitted to run privileged calls (`initialize_state`, `set_proof_reward`, - /// `set_sp1_vkey_hash`). + /// `set_sp1_vkey_hash`, `set_reward_curve`). type AdminOrigin: EnsureOrigin; /// Currency used for treasury reward payouts. @@ -99,7 +110,7 @@ pub mod pallet { #[pallet::constant] type TreasuryPalletId: Get; - /// Maximum SCALE-encoded size of a single `SubmitProofPayload`. + /// Maximum size in bytes of a single proof payload. #[pallet::constant] type MaxProofSize: Get; @@ -123,12 +134,16 @@ pub mod pallet { #[pallet::constant] type AllowedProofTypes: Get<&'static [u8]>; + /// Maximum number of uncle provers rewarded per parachain height, in addition to + /// the first prover. Total provers per height are therefore `MaxUncleProvers + 1`, + /// occupying positions `0..=MaxUncleProvers` (position 0 is always the first prover). + /// Naive proofs only ever occupy position 0; uncle rewards apply to SP1. + #[pallet::constant] + type MaxUncleProvers: Get; + /// The pallet-assets instance used for managing the reputation token. /// Mints reputation tokens 1:1 with native token rewards to proof submitters. - type ReputationAsset: fungible::Mutate< - Self::AccountId, - Balance = <::Currency as Inspect>::Balance, - >; + type ReputationAsset: fungible::Mutate>; /// Weight info. type WeightInfo: crate::weights::WeightInfo; @@ -139,10 +154,10 @@ pub mod pallet { #[pallet::storage] pub type LastRewardedDispatchRoot = StorageValue<_, H256, OptionQuery>; - /// Fixed reward amount per eligible proof. + /// Base reward amount paid to position-0 (first) provers. Uncle rewards are derived from + /// this value by applying [`RewardCurve`]. #[pallet::storage] - pub type ProofReward = - StorageValue<_, <::Currency as Inspect>::Balance, ValueQuery>; + pub type ProofReward = StorageValue<_, BalanceOf, ValueQuery>; /// SP1 verification key hash (ASCII hex), consumed by /// `beefy_verifier::sp1::verify_sp1_consensus`. @@ -167,16 +182,39 @@ pub mod pallet { pub type RotationProofs = StorageValue<_, BoundedBTreeMap, ValueQuery>; + /// Pre-proof BEEFY consensus state snapshot keyed by the parachain height the + /// first accepted proof advanced to. Uncle proofs verify against this snapshot + /// because the live consensus state has already been advanced by the first proof. + /// Pruned alongside `MessagingProofs`/`RotationProofs` eviction. + #[pallet::storage] + pub type ProofContext = StorageMap<_, Blake2_128Concat, u64, Vec, OptionQuery>; + + /// Number of provers rewarded so far per parachain height. Used as the position + /// index when the next uncle lands. + #[pallet::storage] + pub type ProverCount = StorageMap<_, Blake2_128Concat, u64, u32, ValueQuery>; + + /// `keccak256(proof_bytes)` for every proof accepted at a given parachain height. + /// SP1 Groth16 randomizes the witness so independent provers produce different + /// bytes; re-submission of the exact same bytes hits this set and is rejected. + /// Bounded by `MaxUncleProvers + 1` (one first + `MaxUncleProvers` uncles). + #[pallet::storage] + pub type AcceptedProofHashes = + StorageMap<_, Blake2_128Concat, u64, BoundedVec>, ValueQuery>; + + /// Reward fractions `(numerator, denominator)` indexed by prover position. The base + /// reward [`ProofReward`] is multiplied by the fraction at the prover's position. + /// An empty curve falls back to `(1, 1)` for position 0 and zero for uncles, so the + /// pallet keeps the existing single-prover behaviour until an admin sets a curve. + /// Bounded by `MaxUncleProvers + 1`, matching the position range `0..=MaxUncleProvers`. + #[pallet::storage] + pub type RewardCurve = + StorageValue<_, BoundedVec<(u32, u32), MaxStoredProvers>, ValueQuery>; + #[pallet::error] pub enum Error { /// Consensus state has not been initialized yet. NotInitialized, - /// Payload exceeds `MaxProofSize`. - ProofTooLarge, - /// `submitter` could not be interpreted as an SR25519 public key. - InvalidAccountId, - /// Signature did not verify against the signed message. - BadSignature, /// Proof is stale: `latest_height ≤ latest_state_machine_height`, or the proof rotated to /// an unexpected authority set. StaleProof, @@ -197,33 +235,47 @@ pub mod pallet { /// The proof does not advance state: no authority set rotation and no new /// messages since the last rewarded proof. NoNewWork, + /// `MaxUncleProvers` already accepted at this height. + UncleSlotsFull, + /// This exact proof has already been accepted at this height. + ProofAlreadySubmitted, + /// No first proof has been seen at this height, so no uncle slot exists. + NoUncleContext, + /// `set_reward_curve` received a fraction with a zero denominator. + InvalidRewardCurve, } #[pallet::event] #[pallet::generate_deposit(pub(super) fn deposit_event)] pub enum Event { - /// A proof was accepted and state advanced. + /// A first proof was accepted and state advanced to `height`. ProofAccepted { submitter: T::AccountId, height: u64, new_set_id: Option, - rewarded: <::Currency as Inspect>::Balance, + rewarded: BalanceOf, + }, + /// An uncle proof was accepted at `height` (which the first proof already + /// advanced state to). No state advance; reward only. + UncleProofAccepted { + submitter: T::AccountId, + height: u64, + rewarded: BalanceOf, + /// `1..=MaxUncleProvers`. Position 0 always belongs to the first proof. + position: u32, }, /// Consensus state was (re)initialized by admin. StateInitialized { current_set_id: u64, next_set_id: u64, latest_beefy_height: u32 }, /// Reward amount updated. - ProofRewardUpdated { - new_reward: <::Currency as Inspect>::Balance, - }, + ProofRewardUpdated { new_reward: BalanceOf }, /// SP1 verification key hash updated. Sp1VkeyHashUpdated, + /// Reward curve updated. + RewardCurveUpdated, } #[pallet::call] - impl Pallet - where - T::AccountId: Into<[u8; 32]>, - { + impl Pallet { /// Initialize or reset the BEEFY consensus state from its solidity-ABI encoding. #[pallet::call_index(0)] #[pallet::weight(T::WeightInfo::initialize_state())] @@ -273,55 +325,187 @@ pub mod pallet { Ok(()) } - /// Submit a BEEFY consensus proof. Unsigned; authenticated via the payload's - /// SR25519 signature. + /// Submit a BEEFY consensus proof. Signed: the signer is the reward payee. + /// + /// `proof` is a `BoundedVec` so SCALE decoding rejects oversized payloads inside + /// the txpool, before the runtime ever pays for the call. Successful proofs + /// (first or uncle) refund their transaction fee via `Pays::No`; failed proofs + /// pay the fee, which is the spam deterrent. #[pallet::call_index(1)] #[pallet::weight(T::WeightInfo::submit_proof())] pub fn submit_proof( origin: OriginFor, - payload: SubmitProofPayload, - signature: Signature, + proof: BoundedVec, + ) -> DispatchResultWithPostInfo { + let submitter = ensure_signed(origin)?; + Self::do_submit_proof(submitter, proof.into_inner()) + } + + /// Update the base reward amount paid to position-0 provers. + #[pallet::call_index(2)] + #[pallet::weight(T::WeightInfo::set_proof_reward())] + pub fn set_proof_reward(origin: OriginFor, reward: BalanceOf) -> DispatchResult { + ::AdminOrigin::ensure_origin(origin)?; + ProofReward::::put(reward); + Self::deposit_event(Event::ProofRewardUpdated { new_reward: reward }); + Ok(()) + } + + /// Update the SP1 verification key hash. + #[pallet::call_index(3)] + #[pallet::weight(T::WeightInfo::set_sp1_vkey_hash())] + pub fn set_sp1_vkey_hash(origin: OriginFor, vkey_hash: Vec) -> DispatchResult { + ::AdminOrigin::ensure_origin(origin)?; + Sp1VkeyHash::::put(vkey_hash); + Self::deposit_event(Event::Sp1VkeyHashUpdated); + Ok(()) + } + + /// Set the decreasing reward curve. Position `i` gets `ProofReward * curve[i].0 / + /// curve[i].1`. Empty curve means default behaviour (position 0 = full reward, + /// no uncle rewards). Bounded by `MaxUncleProvers + 1` to match the storage, + /// covering position 0 (first proof) plus all uncle slots. + #[pallet::call_index(4)] + #[pallet::weight(T::WeightInfo::set_reward_curve())] + pub fn set_reward_curve( + origin: OriginFor, + curve: BoundedVec<(u32, u32), MaxStoredProvers>, ) -> DispatchResult { - ensure_none(origin)?; + ::AdminOrigin::ensure_origin(origin)?; + // `numerator > denominator` would multiply the base reward above 100% for that + // position, turning a fat-fingered curve into a treasury drain. Reject it + // outright — uncle positions are meant to *decrease* from the position-0 base. + if curve.iter().any(|(num, denom)| *denom == 0 || num > denom) { + Err(Error::::InvalidRewardCurve)? + } + RewardCurve::::put(curve); + Self::deposit_event(Event::RewardCurveUpdated); + Ok(()) + } + } - let outcome = Self::verify_and_apply(&payload, &signature)?; + /// Outcome of a successful [`Pallet::verify_and_apply`] call. + pub struct VerifyOutcome { + /// Highest parachain height finalized by this proof (0 if none). + pub latest_height: u64, + /// `current_authorities.id` of the consensus state *after* the update. + pub current_set_id: u64, + /// True iff the proof rotated the current authority set. + pub rotated: bool, + /// True iff the child trie root changed since the last rewarded proof. + pub has_new_messages: bool, + /// Root of the child trie verified by this proof. + pub child_trie_root: H256, + } + + impl Pallet { + /// Returns the latest proven parachain height from `pallet-ismp` for the + /// coprocessor state machine. + fn latest_height() -> u64 { + let host = pallet_ismp::Pallet::::default(); + let id = ismp::consensus::StateMachineId { + state_id: T::Coprocessor::get() + .expect("coprocessor must be set in hyperbridge runtime; qed"), + consensus_state_id: T::ConsensusStateId::get(), + }; + host.latest_commitment_height(id).unwrap_or_default() + } + + /// Top-level dispatch. On success, settle as the first proof; on any failure for + /// an SP1 proof, retry through the uncle path which runs its own cryptographic + /// check against the saved snapshot. + fn do_submit_proof(submitter: T::AccountId, proof: Vec) -> DispatchResultWithPostInfo { + // Size is enforced by the `BoundedVec` parameter on + // `submit_proof` — oversized payloads fail SCALE decoding inside the txpool + // and never reach this dispatch. + let proof_type = *proof.first().ok_or(Error::::UnknownProofType)?; + if !T::AllowedProofTypes::get().contains(&proof_type) { + Err(Error::::UnknownProofType)? + } + + // Decode the ABI payload then re-encode it canonically and hash *that* instead + // of the raw input. `alloy_sol_types::abi_decode_params` silently ignores + // trailing bytes after the encoded sequence ends, so without this a submitter + // could pad a valid proof with junk to mint a fresh `keccak256(proof)` and + // bypass the `AcceptedProofHashes` dedup. Hashing the canonical re-encoding + // collapses every ABI-equivalent input to the same hash by construction. + let abi_payload = &proof[1..]; + let canonical_payload = + match proof_type { + types::PROOF_TYPE_SP1 => { + let p = + ::abi_decode_params( + abi_payload, + ) + .map_err(|_| Error::::AbiDecodeFailed)?; + ::abi_encode_params(&p) + }, + types::PROOF_TYPE_NAIVE => { + let p = + ::abi_decode_params( + abi_payload, + ) + .map_err(|_| Error::::AbiDecodeFailed)?; + ::abi_encode_params(&p) + }, + _ => Err(Error::::UnknownProofType)?, + }; + let mut canonical_proof = Vec::with_capacity(1 + canonical_payload.len()); + canonical_proof.push(proof_type); + canonical_proof.extend_from_slice(&canonical_payload); + let proof_hash: H256 = sp_io::hashing::keccak_256(&canonical_proof).into(); + + // Read the pre-proof consensus state before `verify_and_apply` mutates it. + // Used to seed `ProofContext` for the first-proof path. + let host = pallet_ismp::Pallet::::default(); + let prev_state_bytes = host + .consensus_state(ismp_beefy::BEEFY_CONSENSUS_ID) + .map_err(|_| Error::::NotInitialized)?; + + match Self::verify_and_apply(&canonical_proof) { + Ok(outcome) => Self::settle_first_proof( + submitter, + canonical_proof, + proof_hash, + proof_type, + prev_state_bytes, + outcome, + ), + // `verify_and_apply` returns `StaleProof` for SP1 proofs whose + // `block_number <= prev.latest_beefy_height` via the upfront height check, + // which is exactly the legitimate-uncle case. Other failures (corrupt + // bytes, bad signatures, wrong vkey) propagate so the submitter pays the + // fee instead of paying for a wasted second SP1 verification. + Err(Error::::StaleProof) if proof_type == types::PROOF_TYPE_SP1 => + Self::settle_uncle_proof(submitter, canonical_proof, proof_hash), + Err(e) => Err(e.into()), + } + } + + /// First-proof path: state has been advanced inside `verify_and_apply`. Save the + /// pre-proof snapshot, record the proof hash, pay the reward at position 0, and + /// run ring-buffer eviction across `MessagingProofs`/`RotationProofs`. When an + /// entry falls off either ring, prune the matching uncle rows. + fn settle_first_proof( + submitter: T::AccountId, + proof: Vec, + proof_hash: H256, + proof_type: u8, + prev_state_bytes: Vec, + outcome: VerifyOutcome, + ) -> DispatchResultWithPostInfo { if outcome.has_new_messages { LastRewardedDispatchRoot::::put(outcome.child_trie_root); } - let zero = <::Currency as Inspect>::Balance::default(); - let reward = ProofReward::::get(); - let reward_paid = if reward > zero { - let treasury: T::AccountId = - ::TreasuryPalletId::get().into_account_truncating(); - ::Currency::transfer( - &treasury, - &payload.submitter, - reward, - Preservation::Preserve, - ) - .map_err(|e| { - log::warn!( - target: "ismp", - "[beefy-consensus-proofs] treasury reward transfer failed: {e:?}", - ); - Error::::RewardTransferFailed - })?; - - // Mint reputation tokens 1:1 with the native reward - if let Err(e) = T::ReputationAsset::mint_into(&payload.submitter, reward) { - log::warn!( - target: "ismp", - "[beefy-consensus-proofs] reputation mint failed: {e:?}", - ); - } + // Record uncle metadata for SP1 proofs only. Naive proofs are ineligible. + if proof_type == types::PROOF_TYPE_SP1 { + Self::record_uncle_metadata(outcome.latest_height, prev_state_bytes, proof_hash)?; + } - reward - } else { - zero - }; + let reward_paid = Self::pay_position_reward(&submitter, 0)?; - sp_io::offchain_index::set(&types::offchain_key(outcome.latest_height), &payload.proof); + sp_io::offchain_index::set(&types::offchain_key(outcome.latest_height), &proof); let evicted_height = if outcome.rotated { RotationProofs::::mutate(|map| { let evicted = (map.len() as u32 == T::MaxStoredProofs::get()) @@ -342,160 +526,203 @@ pub mod pallet { if let Some(height) = evicted_height { sp_io::offchain_index::clear(&types::offchain_key(height)); + ProofContext::::remove(height); + ProverCount::::remove(height); + AcceptedProofHashes::::remove(height); } Self::deposit_event(Event::ProofAccepted { - submitter: payload.submitter.clone(), + submitter, height: outcome.latest_height, new_set_id: outcome.rotated.then_some(outcome.current_set_id), rewarded: reward_paid, }); - Ok(()) + Ok(PostDispatchInfo { actual_weight: None, pays_fee: Pays::No }) } - /// Update the fixed reward amount. - #[pallet::call_index(2)] - #[pallet::weight(T::WeightInfo::set_proof_reward())] - pub fn set_proof_reward( - origin: OriginFor, - reward: <::Currency as Inspect>::Balance, - ) -> DispatchResult { - ::AdminOrigin::ensure_origin(origin)?; - ProofReward::::put(reward); - Self::deposit_event(Event::ProofRewardUpdated { new_reward: reward }); - Ok(()) - } + /// Uncle path: the live consensus state is already past this proof's target + /// because a prior prover landed first. Look up the pre-proof snapshot saved at + /// that time and verify the SP1 proof directly against it. + fn settle_uncle_proof( + submitter: T::AccountId, + proof: Vec, + proof_hash: H256, + ) -> DispatchResultWithPostInfo { + // The first proof for the most-recent finality target advanced state to + // `latest_height` and snapshotted the pre-state under that key. Uncles that + // arrive while that race is still open look up the same key. If a different + // first proof has since bumped state past this height, the snapshot under + // `latest_height` is for the *newer* first proof; the uncle's lower + // `proof.block_number` will fail SP1 verification against it (StaleHeight), + // so a stale uncle pays the tx fee. + let parachain_height = Self::latest_height(); + + let snapshot_bytes = + ProofContext::::get(parachain_height).ok_or(Error::::NoUncleContext)?; + + // `ProverCount` is incremented after each successful uncle (the first proof + // is position 0). With `MaxUncleProvers = N`, valid uncle positions are + // `1..=N`, so reject once the position the next uncle would occupy exceeds N. + let position = ProverCount::::get(parachain_height); + if position > T::MaxUncleProvers::get() { + Err(Error::::UncleSlotsFull)? + } - /// Update the SP1 verification key hash. - #[pallet::call_index(3)] - #[pallet::weight(T::WeightInfo::set_sp1_vkey_hash())] - pub fn set_sp1_vkey_hash(origin: OriginFor, vkey_hash: Vec) -> DispatchResult { - ::AdminOrigin::ensure_origin(origin)?; - Sp1VkeyHash::::put(vkey_hash); - Self::deposit_event(Event::Sp1VkeyHashUpdated); - Ok(()) + let hashes = AcceptedProofHashes::::get(parachain_height); + if hashes.contains(&proof_hash) { + Err(Error::::ProofAlreadySubmitted)? + } + + // Verify the proof cryptographically against the saved trusted state. We + // don't apply state mutations because the live state is already past this point. + let snapshot: beefy_verifier_primitives::ConsensusState = + Decode::decode(&mut &snapshot_bytes[..]).map_err(|_| Error::::NotInitialized)?; + + let abi_payload = &proof[1..]; + let abi_proof = + ::abi_decode_params( + abi_payload, + ) + .map_err(|_| Error::::AbiDecodeFailed)?; + let scale_proof: beefy_verifier_primitives::Sp1BeefyProof = abi_proof.into(); + + let vkey_bytes = Sp1VkeyHash::::get(); + let vkey = + core::str::from_utf8(&vkey_bytes).map_err(|_| Error::::VerificationFailed)?; + + beefy_verifier::sp1::verify_sp1_consensus::( + snapshot, + scale_proof, + vkey, + ) + .map_err(|e| { + log::debug!( + target: "ismp", + "[beefy-consensus-proofs] uncle SP1 verification failed: {e:?}", + ); + Error::::VerificationFailed + })?; + + let reward_paid = Self::pay_position_reward(&submitter, position)?; + + AcceptedProofHashes::::try_mutate(parachain_height, |vec| vec.try_push(proof_hash)) + .map_err(|_| Error::::UncleSlotsFull)?; + ProverCount::::insert(parachain_height, position.saturating_add(1)); + + Self::deposit_event(Event::UncleProofAccepted { + submitter, + height: parachain_height, + rewarded: reward_paid, + position, + }); + + Ok(PostDispatchInfo { actual_weight: None, pays_fee: Pays::No }) } - } - #[pallet::validate_unsigned] - impl ValidateUnsigned for Pallet - where - T::AccountId: Into<[u8; 32]>, - { - type Call = Call; + /// Save the pre-proof snapshot keyed by `parachain_height` and register the + /// first proof's hash. Uncles for this height land in the same rows; eviction + /// from `MessagingProofs`/`RotationProofs` removes them in lockstep. + fn record_uncle_metadata( + parachain_height: u64, + prev_state_bytes: Vec, + proof_hash: H256, + ) -> Result<(), Error> { + ProofContext::::insert(parachain_height, prev_state_bytes); + + AcceptedProofHashes::::try_mutate(parachain_height, |vec| vec.try_push(proof_hash)) + .map_err(|_| Error::::UncleSlotsFull)?; + ProverCount::::mutate(parachain_height, |c| *c = c.saturating_add(1)); - // empty pre-dispatch so we don't modify storage - fn pre_dispatch(_call: &Self::Call) -> Result<(), TransactionValidityError> { Ok(()) } - fn validate_unsigned(_source: TransactionSource, call: &Self::Call) -> TransactionValidity { - let Call::submit_proof { payload, signature } = call else { - return Err(TransactionValidityError::Invalid(InvalidTransaction::Call)); - }; + /// Apply the curve at `position` to [`ProofReward`], transfer from the treasury, + /// and mint reputation 1:1. + fn pay_position_reward( + submitter: &T::AccountId, + position: u32, + ) -> Result, Error> { + let zero = BalanceOf::::default(); + let base = ProofReward::::get(); + if base == zero { + return Ok(zero); + } - // Single verification path: signature + handle_incoming_message (which itself - // runs the full BEEFY / SP1 check and persists state). In `validate_unsigned` - // the persistence happens in a discarded overlay; `submit_proof` re-runs this - // and the writes stick. - let outcome = Self::verify_and_apply(payload, signature).map_err(|e| { - log::debug!(target: "ismp", "validate_unsigned rejected: {e:?}"); - // Discriminate reject reasons with distinct Custom codes so tooling can - // tell why a proof was dropped without relying on log scraping. - let code: u8 = match e { - Error::::ProofTooLarge => 1, - Error::::InvalidAccountId => 2, - Error::::BadSignature => 3, - Error::::NotInitialized => 4, - Error::::UnknownProofType => 5, - Error::::AbiDecodeFailed => 6, - Error::::VerificationFailed => 7, - Error::::UnexpectedAuthoritySet => 8, - Error::::StaleProof => 9, - Error::::NoNewWork => 10, - _ => 0, - }; - TransactionValidityError::Invalid(InvalidTransaction::Custom(code)) - })?; + let reward = Self::position_reward(base, position); + if reward == zero { + return Ok(zero); + } - ValidTransaction::with_tag_prefix("BeefyConsensusProofs") - .longevity(PROOF_LONGEVITY) - .propagate(true) - .priority(outcome.latest_height) - .and_provides(types::PROOF_TAG.encode()) - .build() - } - } + let treasury: T::AccountId = + ::TreasuryPalletId::get().into_account_truncating(); + ::Currency::transfer(&treasury, submitter, reward, Preservation::Preserve) + .map_err(|e| { + log::warn!( + target: "ismp", + "[beefy-consensus-proofs] treasury reward transfer failed: {e:?}", + ); + Error::::RewardTransferFailed + })?; - /// Outcome of a successful [`Pallet::verify_and_apply`] call. - pub struct VerifyOutcome { - /// Highest parachain height finalized by this proof (0 if none). - pub latest_height: u64, - /// `current_authorities.id` of the consensus state *after* the update. - pub current_set_id: u64, - /// True iff the proof rotated the current authority set. - pub rotated: bool, - /// True iff the child trie root changed since the last rewarded proof. - pub has_new_messages: bool, - /// Root of the child trie verified by this proof. - pub child_trie_root: H256, - } + if let Err(e) = T::ReputationAsset::mint_into(submitter, reward) { + log::warn!( + target: "ismp", + "[beefy-consensus-proofs] reputation mint failed: {e:?}", + ); + } - impl Pallet - where - T::AccountId: Into<[u8; 32]>, - { - /// Returns the latest proven parachain height from `pallet-ismp` for the - /// coprocessor state machine. - fn latest_height() -> u64 { - let host = pallet_ismp::Pallet::::default(); - let id = ismp::consensus::StateMachineId { - state_id: T::Coprocessor::get() - .expect("coprocessor must be set in hyperbridge runtime; qed"), - consensus_state_id: T::ConsensusStateId::get(), + Ok(reward) + } + + /// Apply the configured curve to the base reward. Empty curve means full reward + /// at position 0 and zero at later positions, preserving pre-uncle behaviour. + /// + /// Uses a u128 round-trip for `* num / denom`. Hyperbridge runtimes use a u128 + /// balance, so the trip is lossless; on any runtime where balances exceed u128 + /// we'd saturate, which is acceptable since rewards are small relative to total + /// supply. + fn position_reward(base: BalanceOf, position: u32) -> BalanceOf { + let curve = RewardCurve::::get(); + if curve.is_empty() { + return if position == 0 { base } else { BalanceOf::::default() }; + } + let Some((num, denom)) = curve.get(position as usize).copied() else { + return BalanceOf::::default(); }; - host.latest_commitment_height(id).unwrap_or_default() + if denom == 0 { + return BalanceOf::::default(); + } + use sp_runtime::SaturatedConversion; + let base_u128: u128 = base.saturated_into(); + let scaled = base_u128.saturating_mul(num as u128).saturating_div(denom as u128); + scaled.saturated_into::>() } - /// Single verification path shared by `validate_unsigned` and `submit_proof`: + /// First-proof verification path: /// - /// 1. SR25519 signature check over the payload. - /// 2. ABI-decode the proof into the SCALE shape `ismp-beefy` consumes. - /// 3. Dispatch `Message::Consensus` through `ismp::handlers::handle_incoming_message` — - /// `pallet-ismp` routes to `BeefyConsensusClient::verify_consensus` which runs the full - /// BEEFY / SP1 check and persists consensus state + parachain commitments. - /// 4. Extract the proven parachain height from the returned `StateMachineUpdated` events + /// 1. ABI-decode the proof into the SCALE shape `ismp-beefy` consumes. + /// 2. Dispatch `Message::Consensus` through `ismp::handlers::handle_incoming_message`, + /// which routes to `BeefyConsensusClient::verify_consensus`. That runs the full BEEFY / + /// SP1 check and persists consensus state + parachain commitments. The verifier's own + /// upfront stale check (`beefy_verifier::error::Error::StaleHeight`) propagates back as + /// a `Custom(...)` ismp error here; we surface it as `StaleProof` so the dispatcher can + /// route an SP1 uncle to `settle_uncle_proof`. + /// 3. Extract the proven parachain height from the returned `StateMachineUpdated` events /// and the new authority-set id from the stored consensus state so the caller can /// classify the proof as rotation / messaging. - /// - /// Staleness rejection (messaging proofs must push height forward; rotation must - /// target the expected next set id) is enforced here so both `validate_unsigned` - /// (which runs this in a discarded overlay) and `submit_proof` (which persists) - /// share the same accept/reject decision. - pub fn verify_and_apply( - payload: &SubmitProofPayload, - signature: &Signature, - ) -> Result> { - // Size check. - if (payload.proof.len() as u32) > T::MaxProofSize::get() { - Err(Error::::ProofTooLarge)? - } - - // Signature. - let public = sr25519::Public::from(payload.submitter.clone().into()); - let proof_digest = sp_io::hashing::keccak_256(&payload.proof); - let msg_preimage = (types::SIGNATURE_DOMAIN, &payload.submitter, proof_digest).encode(); - let signed_msg = sp_io::hashing::keccak_256(&msg_preimage); - if !sp_io::crypto::sr25519_verify(signature, &signed_msg, &public) { - Err(Error::::BadSignature)? - } + pub fn verify_and_apply(proof: &[u8]) -> Result> { + let proof_type = *proof.first().ok_or(Error::::UnknownProofType)?; + let abi_payload = &proof[1..]; - let proof_type = *payload.proof.first().ok_or(Error::::UnknownProofType)?; - if !T::AllowedProofTypes::get().contains(&proof_type) { - Err(Error::::UnknownProofType)? - } - let abi_payload = &payload.proof[1..]; + let host = pallet_ismp::Pallet::::default(); + let prev_state_bytes = host + .consensus_state(ismp_beefy::BEEFY_CONSENSUS_ID) + .map_err(|_| Error::::NotInitialized)?; + let prev_state: beefy_verifier_primitives::ConsensusState = + Decode::decode(&mut &prev_state_bytes[..]) + .map_err(|_| Error::::NotInitialized)?; + let prev_height = Self::latest_height(); let consensus_proof = match proof_type { types::PROOF_TYPE_SP1 => { @@ -519,29 +746,38 @@ pub mod pallet { _ => Err(Error::::UnknownProofType)?, }; - // Hand off to pallet-ismp with SCALE-encoded proof for verification. - let host = pallet_ismp::Pallet::::default(); - let prev_state_bytes = host - .consensus_state(ismp_beefy::BEEFY_CONSENSUS_ID) - .map_err(|_| Error::::NotInitialized)?; - let prev_state: beefy_verifier_primitives::ConsensusState = - Decode::decode(&mut &prev_state_bytes[..]) - .map_err(|_| Error::::NotInitialized)?; - let prev_height = Self::latest_height(); let result = handlers::handle_incoming_message( &host, Message::Consensus(IsmpConsensusMessage { consensus_proof, consensus_state_id: ismp_beefy::BEEFY_CONSENSUS_ID, - signer: public.to_vec(), + signer: vec![], }), ) .map_err(|e| { log::warn!( target: "ismp", - "[beefy-consensus-proofs] handle_incoming_message failed: {e}", + "[beefy-consensus-proofs] handle_incoming_message failed: {e:?}", ); - Error::::VerificationFailed + // `BeefyConsensusClient::verify_consensus` wraps verifier failures as + // `ismp::Error::AnyHow(anyhow::Error)`, preserving the typed + // `beefy_verifier::Error` inside. Walk the chain — `anyhow::Error` (from + // `update_client`'s return type) → `ismp::Error::AnyHow` → + // `beefy_verifier::Error` — and route `StaleHeight` to the uncle path. + let stale = e + .downcast_ref::() + .and_then(|err| match err { + ismp::error::Error::AnyHow(any) => Some(&any.0), + _ => None, + }) + .and_then(|inner| inner.downcast_ref::()) + .map(|verr| matches!(verr, beefy_verifier::error::Error::StaleHeight { .. })) + .unwrap_or(false); + if stale { + Error::::StaleProof + } else { + Error::::VerificationFailed + } })?; // Highest parachain height finalized by this proof diff --git a/modules/pallets/beefy-consensus-proofs/src/types.rs b/modules/pallets/beefy-consensus-proofs/src/types.rs index 746f67a89..0dbdcd03b 100644 --- a/modules/pallets/beefy-consensus-proofs/src/types.rs +++ b/modules/pallets/beefy-consensus-proofs/src/types.rs @@ -15,32 +15,6 @@ //! Types for `pallet-beefy-consensus-proofs`. -use codec::{Decode, DecodeWithMemTracking, Encode}; -use scale_info::TypeInfo; -use sp_core::sr25519; - -/// Payload submitted via the `submit_proof` unsigned extrinsic. -/// -/// The signed message is `keccak256(("beefy_consensus_proof_v1", submitter, -/// keccak256(proof)).encode())`; the signature in the outer extrinsic is expected to -/// verify against `submitter` interpreted as an SR25519 public key. -/// -/// No nonce: replay is prevented by on-chain state progression. Once a proof is applied -/// `LastProvenHeight` / the BEEFY authority set id advance, and `verify_and_apply` then -/// rejects any resubmission of the same bytes with `StaleProof` or -/// `UnexpectedAuthoritySet`. -#[derive(Clone, Debug, Encode, Decode, DecodeWithMemTracking, TypeInfo, PartialEq, Eq)] -pub struct SubmitProofPayload { - /// The account that signed this payload and that will receive the reward (if any). - pub submitter: AccountId, - /// `bytes1 proof_type || abi-encoded proof body`, matching the wire format consumed by - /// `ConsensusRouter.verify` on the EVM side. - pub proof: alloc::vec::Vec, -} - -/// Domain separator for the signed message. -pub const SIGNATURE_DOMAIN: &[u8] = b"pallet_beefy_consensus_proofs"; - /// Offchain-storage prefix for raw verified proof bytes written by `submit_proof`. /// Combined with the `proven_height` (`u64`, big-endian) to form the actual offchain key. /// All proofs — rotation and messaging alike — share this single namespace since both @@ -52,15 +26,6 @@ pub const PROOF_TYPE_NAIVE: u8 = 0x00; /// Proof type byte: SP1 ZK BEEFY proof. pub const PROOF_TYPE_SP1: u8 = 0x01; -/// `provides` tag for BEEFY consensus proofs — a single fixed slot. At most one proof -/// is retained in the pool at a time; higher `proven_height` wins. Unified across -/// rotation and messaging proofs so that the pool never holds a rotation alongside a -/// messaging proof that would supersede it on inclusion. -pub const PROOF_TAG: &[u8] = b"beefy_consensus_proof"; - -/// Signature type expected alongside [`SubmitProofPayload`]. -pub type Signature = sr25519::Signature; - /// Offchain-storage key for a verified consensus proof keyed by `proven_height`. /// Relayers reconstruct this key off of a [`MessagingProofs`](crate::pallet::MessagingProofs) /// or [`RotationProofs`](crate::pallet::RotationProofs) entry and read the raw diff --git a/modules/pallets/beefy-consensus-proofs/src/weights.rs b/modules/pallets/beefy-consensus-proofs/src/weights.rs index ed55a2197..d51e10902 100644 --- a/modules/pallets/beefy-consensus-proofs/src/weights.rs +++ b/modules/pallets/beefy-consensus-proofs/src/weights.rs @@ -32,6 +32,8 @@ pub trait WeightInfo { fn set_proof_reward() -> Weight; /// Weight of `set_sp1_vkey_hash`. fn set_sp1_vkey_hash() -> Weight; + /// Weight of `set_reward_curve`. + fn set_reward_curve() -> Weight; } /// No-op [`WeightInfo`] for tests and genesis bootstrap. @@ -48,4 +50,7 @@ impl WeightInfo for () { fn set_sp1_vkey_hash() -> Weight { Weight::zero() } + fn set_reward_curve() -> Weight { + Weight::zero() + } } diff --git a/parachain/runtimes/gargantua/src/lib.rs b/parachain/runtimes/gargantua/src/lib.rs index c08f23395..75fff4293 100644 --- a/parachain/runtimes/gargantua/src/lib.rs +++ b/parachain/runtimes/gargantua/src/lib.rs @@ -769,10 +769,12 @@ parameter_types! { /// Unbonding period handed to `pallet-ismp` on first `initialize_state` (21 days in /// seconds), aligning with other BEEFY clients in the runtime. pub const BeefyUnbondingPeriod: u64 = 21 * 24 * 60 * 60; - /// Maximum SCALE-encoded size of a `SubmitProofPayload`. + /// Maximum size in bytes of a single proof passed to `submit_proof`. pub const MaxBeefyProofSize: u32 = 1_048_576; /// Per-bucket ring-buffer size for `MessagingProofs` and `RotationProofs`. pub const MaxStoredBeefyProofs: u32 = 512; + /// Maximum number of unique provers rewarded per BEEFY block (first + uncles). + pub const MaxBeefyUncleProvers: u32 = 5; } parameter_types! { @@ -791,6 +793,7 @@ impl pallet_beefy_consensus_proofs::Config for Runtime { type ConsensusStateId = BeefyConsensusStateId; type UnbondingPeriod = BeefyUnbondingPeriod; type AllowedProofTypes = AllowedBeefyProofTypes; + type MaxUncleProvers = MaxBeefyUncleProvers; type ReputationAsset = ReputationAsset; type WeightInfo = weights::pallet_beefy_consensus_proofs::WeightInfo; } diff --git a/parachain/runtimes/gargantua/src/weights/pallet_beefy_consensus_proofs.rs b/parachain/runtimes/gargantua/src/weights/pallet_beefy_consensus_proofs.rs index 68cb7be98..4598e0da3 100644 --- a/parachain/runtimes/gargantua/src/weights/pallet_beefy_consensus_proofs.rs +++ b/parachain/runtimes/gargantua/src/weights/pallet_beefy_consensus_proofs.rs @@ -14,19 +14,31 @@ // limitations under the License. -//! Weights for `pallet_beefy_consensus_proofs`. +//! Autogenerated weights for `pallet_beefy_consensus_proofs` //! -//! `submit_proof`, `set_proof_reward` and `set_sp1_vkey_hash` use numbers ported from -//! the `pallet_outbound_proofs` benchmark run on an AMD Ryzen Threadripper PRO -//! 5995WX (2026-04-18, wasm-execution=compiled, steps=50, repeat=20). The pallet was -//! subsequently renamed and its extrinsic surface redesigned (submit_proof is now -//! unsigned + SR25519-authed, `initialize_state` was added), so these numbers are a -//! close-but-not-exact starting point — regenerate once benchmarks are wired into CI. -//! -//! Original per-bench numbers: -//! submit_proof ~669ms 5r/2w (dominated by SP1 verification) -//! set_proof_reward ~8.7µs 0r/1w -//! set_sp1_vkey_hash ~4.8µs 0r/1w +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 53.0.0 +//! DATE: 2026-04-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `polytope-labs`, CPU: `AMD Ryzen Threadripper PRO 5995WX 64-Cores` +//! WASM-EXECUTION: `Compiled`, CHAIN: `None`, DB CACHE: 1024 + +// Executed Command: +// frame-omni-bencher +// v1 +// benchmark +// pallet +// --wasm-execution=compiled +// --pallet=pallet_beefy_consensus_proofs +// --extrinsic=* +// --steps=50 +// --repeat=20 +// --unsafe-overwrite-results +// --genesis-builder-preset=development +// --template=./scripts/template.hbs +// --genesis-builder=runtime +// --runtime=./target/release/wbuild/gargantua-runtime/gargantua_runtime.compact.wasm +// --output +// parachain/runtimes/gargantua/src/weights/pallet_beefy_consensus_proofs.rs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -38,46 +50,90 @@ use frame_support::{traits::Get, weights::Weight}; use core::marker::PhantomData; /// Weight functions for `pallet_beefy_consensus_proofs`. +/// +/// `submit_proof` is benchmarked along the single-SP1-verification worst case: the live +/// consensus state is seeded so that when `verify_and_apply` calls +/// `BeefyConsensusClient::verify_consensus`, the inner SP1 verifier returns +/// `StaleHeight` *before* doing any cryptographic work. The pallet maps that error to +/// `StaleProof`, dispatch routes to `settle_uncle_proof`, and SP1 runs once there +/// against the pre-seeded snapshot. This bounds both the uncle path (single SP1 + +/// uncle writes) and the first-proof path (single SP1 + first-proof writes). See +/// `benchmarking.rs::submit_proof`. pub struct WeightInfo(PhantomData); impl pallet_beefy_consensus_proofs::WeightInfo for WeightInfo { - /// Storage: `Ismp::ConsensusStates` (r:1 w:1) + /// Storage: `Ismp::ConsensusStates` (r:1 w:0) + /// Proof: `Ismp::ConsensusStates` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) + /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Ismp::LatestStateMachineHeight` (r:1 w:0) + /// Proof: `Ismp::LatestStateMachineHeight` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Ismp::ConsensusStateClient` (r:1 w:0) + /// Proof: `Ismp::ConsensusStateClient` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Ismp::FrozenConsensusClients` (r:1 w:0) + /// Proof: `Ismp::FrozenConsensusClients` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Timestamp::Now` (r:1 w:0) + /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Ismp::UnbondingPeriod` (r:1 w:0) + /// Proof: `Ismp::UnbondingPeriod` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Ismp::ConsensusClientUpdateTime` (r:1 w:0) + /// Proof: `Ismp::ConsensusClientUpdateTime` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `BeefyConsensusProofs::Sp1VkeyHash` (r:1 w:0) - /// Storage: `BeefyConsensusProofs::LastProvenHeight` (r:1 w:1) - /// Storage: `BeefyConsensusProofs::LastRewardedDispatchRoot` (r:1 w:1) - /// Storage: `BeefyConsensusProofs::RecentProofs` (r:1 w:1) + /// Proof: `BeefyConsensusProofs::Sp1VkeyHash` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `BeefyConsensusProofs::ProofContext` (r:1 w:0) + /// Proof: `BeefyConsensusProofs::ProofContext` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `BeefyConsensusProofs::ProverCount` (r:1 w:1) + /// Proof: `BeefyConsensusProofs::ProverCount` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `BeefyConsensusProofs::AcceptedProofHashes` (r:1 w:1) + /// Proof: `BeefyConsensusProofs::AcceptedProofHashes` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `BeefyConsensusProofs::ProofReward` (r:1 w:0) + /// Proof: `BeefyConsensusProofs::ProofReward` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn submit_proof() -> Weight { // Proof Size summary in bytes: - // Measured: `547` - // Estimated: `4012` - // Minimum execution time: 669_751_633_000 picoseconds. - Weight::from_parts(694_774_527_000, 0) - .saturating_add(Weight::from_parts(0, 4012)) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(4)) + // Measured: `952` + // Estimated: `4417` + // Minimum execution time: 601_332_542_000 picoseconds. + Weight::from_parts(608_064_578_000, 0) + .saturating_add(Weight::from_parts(0, 4417)) + .saturating_add(T::DbWeight::get().reads(13)) + .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `BeefyConsensusProofs::ProofReward` (r:0 w:1) + /// Proof: `BeefyConsensusProofs::ProofReward` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn set_proof_reward() -> Weight { - // Minimum execution time: 8_696_000 picoseconds. - Weight::from_parts(9_027_000, 0) + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 8_867_000 picoseconds. + Weight::from_parts(9_187_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } /// Storage: `BeefyConsensusProofs::Sp1VkeyHash` (r:0 w:1) + /// Proof: `BeefyConsensusProofs::Sp1VkeyHash` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn set_sp1_vkey_hash() -> Weight { - // Minimum execution time: 4_779_000 picoseconds. - Weight::from_parts(5_490_000, 0) + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 9_558_000 picoseconds. + Weight::from_parts(9_939_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `BeefyConsensusProofs::RewardCurve` (r:0 w:1) + /// Proof: `BeefyConsensusProofs::RewardCurve` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn set_reward_curve() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 9_388_000 picoseconds. + Weight::from_parts(9_929_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: `Ismp::ConsensusStateClient` (r:1 w:1) - /// Storage: `Ismp::ConsensusStates` (r:0 w:1) - /// Storage: `Ismp::UnbondingPeriod` (r:0 w:1) - /// Storage: `Ismp::ConsensusClientUpdateTime` (r:0 w:1) - /// Storage: `BeefyConsensusProofs::LastProvenHeight` (r:0 w:1) - /// Storage: `BeefyConsensusProofs::LastRewardedDispatchRoot` (r:0 w:1) fn initialize_state() -> Weight { // Approximated: similar cost class to `set_sp1_vkey_hash`, plus several writes. + // `initialize_state` has no benchmark because it requires an ABI-encoded + // `BeefyConsensusState` fixture. Weight::from_parts(20_000_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().reads(1)) diff --git a/parachain/simtests/Cargo.toml b/parachain/simtests/Cargo.toml index 1fb19ca90..ee2e697b0 100644 --- a/parachain/simtests/Cargo.toml +++ b/parachain/simtests/Cargo.toml @@ -35,6 +35,10 @@ pallet-ismp-rpc = { workspace = true } pallet-intents-rpc = { workspace = true } pallet-intents-coprocessor = { workspace = true, default-features = true } ismp-parachain = { workspace = true, default-features = true } +beefy-prover = { workspace = true } +beefy-verifier-primitives = { workspace = true, default-features = true } +ismp-solidity-abi = { workspace = true, default-features = true } +alloy-sol-types = { workspace = true, default-features = true } merkle-mountain-range = { workspace = true } jsonrpsee-core = { workspace = true, features = ["client"] } trie-db = { workspace = true } @@ -59,6 +63,8 @@ features = [ "sp-keyring", "sc-consensus-manual-seal", "sp-mmr-primitives", + "sp-consensus-beefy", + "sp-io", "pallet-sudo", "pallet-utility", "pallet-vesting", diff --git a/parachain/simtests/src/lib.rs b/parachain/simtests/src/lib.rs index a39bfad36..f17e84f86 100644 --- a/parachain/simtests/src/lib.rs +++ b/parachain/simtests/src/lib.rs @@ -2,5 +2,6 @@ mod hyperbridge_client; mod intents_rpc; mod legacy_storage_items_state_drain_test; mod migration_test; +mod pallet_beefy_consensus_proofs; mod pallet_ismp; mod pallet_mmr; diff --git a/parachain/simtests/src/pallet_beefy_consensus_proofs.rs b/parachain/simtests/src/pallet_beefy_consensus_proofs.rs new file mode 100644 index 000000000..bac807c8c --- /dev/null +++ b/parachain/simtests/src/pallet_beefy_consensus_proofs.rs @@ -0,0 +1,762 @@ +//! Simnode tests for `pallet-beefy-consensus-proofs`. +//! +//! Three tiers: +//! +//! 1. Admin and validation surface that doesn't need a live BEEFY relay: `set_proof_reward`, +//! `set_sp1_vkey_hash`, `set_reward_curve` happy paths, `set_reward_curve` validation (zero +//! denominator, oversized vec), and `submit_proof` extrinsic-boundary rejections (unsigned +//! origin, oversized payload, unknown proof-type byte, malformed naive bytes). +//! 2. Naive happy-path proof flow against a live Paseo relay: build a real BEEFY proof for +//! parachain id 4009, initialize trusted state on simnode, submit the proof and assert state +//! advance + `ProofAccepted` event. Mirrors +//! `modules/pallets/testsuite/src/tests/pallet_ismp_beefy.rs::setup` but drives the live runtime +//! through `submit_proof` rather than calling the consensus client directly. Reads +//! `RELAY_WS_URL` / `PARA_WS_URL` env vars. +//! 3. SP1 uncle dispatch path: mirrors the bench setup in +//! `modules/pallets/beefy-consensus-proofs/src/benchmarking.rs::submit_proof` to exercise +//! `settle_uncle_proof` end-to-end. Forces the live BEEFY consensus state ahead of the SP1 +//! fixture proof's block number (so the verifier returns `StaleHeight`), seeds `ProofContext` +//! with the older snapshot the SP1 verifier accepts, then submits the fixture proof from Bob +//! (uncle accept at position 0) and re-submits the identical bytes from Ferdie (rejected by +//! `AcceptedProofHashes` dedup with `ProofAlreadySubmitted`). The multi-position fan-out is +//! covered by the bench rather than here — generating multiple distinct valid SP1 proofs +//! requires running `polytope-labs/sp1-beefy` once per fixture. No live network access. + +#![cfg(test)] + +use std::{ + env, + time::{SystemTime, UNIX_EPOCH}, +}; + +use alloy_sol_types::SolType; +use anyhow::anyhow; +use codec::{Decode, Encode}; +use polkadot_sdk::{ + sp_consensus_beefy::{self, ecdsa_crypto::Signature, VersionedFinalityProof}, + sp_io::hashing::{blake2_128, keccak_256, twox_128, twox_64}, + *, +}; +use sc_consensus_manual_seal::CreatedBlock; +use sp_core::{crypto::Ss58Codec, Bytes}; +use sp_keyring::sr25519::Keyring; +use subxt::{ + backend::legacy::LegacyRpcMethods, + dynamic::Value, + error::RpcError, + ext::subxt_rpcs::{rpc_params, RpcClient}, + tx::SubmittableTransaction, + OnlineClient, PolkadotConfig, +}; +use subxt_utils::{values::storage_kv_list_to_value, Hyperbridge}; + +use beefy_prover::{ + relay::{fetch_mmr_proof, paras_parachains}, + rs_merkle::MerkleTree, + util::{hash_authority_addresses, MerkleHasher}, + Prover, +}; +use beefy_verifier_primitives::{ + ConsensusMessage, ConsensusState, MmrProof, ParachainHeader, ParachainProof, + SignatureWithAuthorityIndex, SignedCommitment as BvpSignedCommitment, +}; +use ismp_solidity_abi::beefy::{ + BeefyConsensusProof as SolBeefyConsensusProof, BeefyConsensusState as SolBeefyConsensusState, +}; +use primitive_types::H256; + +const PROOF_TYPE_NAIVE: u8 = 0; +const UNKNOWN_PROOF_TYPE: u8 = 0xFF; +/// Matches `MaxBeefyProofSize` in the gargantua runtime config. +const MAX_PROOF_SIZE: usize = 256 * 1024; +/// Matches `MaxBeefyUncleProvers` in the gargantua runtime; the storage cap +/// (`MaxStoredProvers`) is one larger. +const MAX_UNCLE_PROVERS: usize = 5; + +/// `ConsensusClientId` for BEEFY (`b"BEEF"`); duplicated here because pulling +/// `ismp-beefy` into simtests just for this constant is excessive. +const BEEFY_CONSENSUS_ID: [u8; 4] = *b"BEEF"; + +/// SCALE-encoded `beefy_verifier_primitives::ConsensusState` for the SP1 Groth16 fixture +/// used in `evm/tests/foundry/SP1BeefyTest.sol::testVerifySp1Optional`. The first 4 bytes +/// (`latest_beefy_height` LE) decode to 30_832_930 = 0x01d67922, which is below the +/// fixture proof's `blockNumber = 0x01d6792a`. Used as the pre-proof snapshot the SP1 +/// verifier accepts inside the uncle path. Mirrors `TRUSTED_STATE_SCALE` in +/// `modules/pallets/beefy-consensus-proofs/src/benchmarking.rs`. +const TRUSTED_STATE_SCALE: [u8; 128] = hex_literal::hex!("2279d60118532a010000000000000000000000000000000000000000000000000000000000000000751200000000000057020000a7161e52f2f4249039441385a41c6c8e36207a9b6a65d9bfae4272156ec31f49761200000000000057020000a7161e52f2f4249039441385a41c6c8e36207a9b6a65d9bfae4272156ec31f49"); + +/// Same fixture as `TRUSTED_STATE_SCALE` but with `latest_beefy_height` bumped to +/// 30_832_938 = 0x01d6792a (first byte `22` → `2a`), which equals the fixture proof's +/// `blockNumber`. Stored as the live BEEFY consensus state so dispatch hits the SP1 +/// verifier's own `StaleHeight` short-circuit and the pallet routes the proof to +/// `settle_uncle_proof`. Mirrors `LIVE_STATE_SCALE` in `benchmarking.rs`. +const LIVE_STATE_SCALE: [u8; 128] = hex_literal::hex!("2a79d60118532a010000000000000000000000000000000000000000000000000000000000000000751200000000000057020000a7161e52f2f4249039441385a41c6c8e36207a9b6a65d9bfae4272156ec31f49761200000000000057020000a7161e52f2f4249039441385a41c6c8e36207a9b6a65d9bfae4272156ec31f49"); + +/// Wire-format proof: `[PROOF_TYPE_SP1] ++ abi.encode(SP1BeefyProof)` (without the outer +/// struct offset, matching what `::abi_decode_params` accepts). +/// ABI bytes lifted verbatim from `SP1BeefyTest.sol::testVerifySp1Optional`. Mirrors +/// `WIRE_PROOF` in `benchmarking.rs`. +const SP1_WIRE_PROOF: [u8; 1249] = hex_literal::hex!("010000000000000000000000000000000000000000000000000000000001d6792a000000000000000000000000000000000000000000000000000000000000127500000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001d67929e1dbc67b9da4b90227fb3dc2e7ffdce4e120d583502399e4bd083c02651ca5eb00000000000000000000000000000000000000000000000000000000000012760000000000000000000000000000000000000000000000000000000000000257a7161e52f2f4249039441385a41c6c8e36207a9b6a65d9bfae4272156ec31f4963bc2eb07f9c83afe64eb8815b626cd0a7d2a1bbb4630a44a1896af297d0135d00000000000000000000000000000000000000000000000000000000000001600000000000000000000000000000000000000000000000000000000000000340000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000d2700000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000139739e9bd7f1addf87db9b6a762bd0e1713baa895c3b82b4595080e5ba02fb5b3cf2915702b49122c32b822e6a11384074d8902d5ea5f79c7cb0d7804e49501b8b532298f49e38d3f7140ce1ba61c243152e4e380b37eb628e08d5270d8b2c5e4ebedd84bb14066175726120fbc4d208000000000452505352902a869d4e00b3bb93f1e88e41a2b5f51fc637626b4ce1da15749ef2d79de4797a9ae459070449534d50010118a13886ac93d163a1d22cdef94e018eba5189424a66b7bd03a5ac232beb46bf08b0f9d2b979fff833d7e21a64a5183c61e2630c0b452236baba3c1b4ff41821044953544d20ca3be169000000000561757261010152d45dea4dcf058b0610e12981e0e4c97ad153f26481510c0b78beedf1848b4dd2abd37b8c6b800b72fa12199898eca7651471b49e38d6167a84fb6e2df7c7840000000000000000000000000000000000000000000000000000000000000000000000000001644388a21c0000000000000000000000000000000000000000000000000000000000000000002f850ee998974d6cc00e50cd0814b098c05bfade466d28573240d057f2535200000000000000000000000000000000000000000000000000000000000000002ac5e596c552ee76353c176f0870e47a0aa765ceafc4c65b03dbf434e27fa9062f185bdc40f7aae982c1c8c6b766dd491a1e1cd60128efbc58da965e5be96320287f4ce1b04538f0c8287c8eff096c36df67dc17970032546c9b3d4dd5510c5c25e880e13469e1e1aca1b41c367f2ecf04da65f7602fb53ec212b03d0148157b2cd9a79a9779f350d240e6d4c980848302fca8c7447c5fa7ac8d3c6eefcd0c640acff8b27ea316db978652553e3d054765094cf0dab6085a616489cdb973c42b258e22f346ac3ceb3e2e6750c37dad1f98f6ca15d1f70659343caa52dbbcad150b75dd2dcf0ba0a664ea4605b291df54ab1aa5b4c55034b9425ba29cc87eca7b00000000000000000000000000000000000000000000000000000000"); + +/// SP1 verification key the fixture proof was generated against. +const SP1_FIXTURE_VKEY: &[u8] = + b"0x0059fd0bff44da77999bb7974cbcf2ac7dc89e5869352f20a2f3cd46c9f53d5c"; + +/// Storage-key builder for a `Twox64Concat` map (`twox_128(pallet) ++ twox_128(item) ++ +/// twox_64(key) ++ key`). +fn twox_64_concat_key(pallet: &[u8], item: &[u8], key: &[u8]) -> Vec { + [twox_128(pallet).as_slice(), twox_128(item).as_slice(), twox_64(key).as_slice(), key].concat() +} + +/// Storage-key builder for a `Blake2_128Concat` map (`twox_128(pallet) ++ twox_128(item) +/// ++ blake2_128(key) ++ key`). +fn blake2_128_concat_key(pallet: &[u8], item: &[u8], key: &[u8]) -> Vec { + [twox_128(pallet).as_slice(), twox_128(item).as_slice(), blake2_128(key).as_slice(), key] + .concat() +} + +/// Build a `(numerator, denominator)` value as `subxt` expects for the +/// `set_reward_curve` argument: a `BoundedVec<(u32, u32), _>`. +fn fraction_value(num: u32, denom: u32) -> Value { + Value::unnamed_composite(vec![Value::u128(num as u128), Value::u128(denom as u128)]) +} + +fn curve_value(fractions: &[(u32, u32)]) -> Value { + Value::unnamed_composite( + fractions.iter().map(|(n, d)| fraction_value(*n, *d)).collect::>(), + ) +} + +/// Submit a sudo-wrapped call signed by Alice (the simnode sudo key) and wait +/// for finalization. Returns the dispatch result so callers can assert on +/// success / failure of the inner call. +async fn submit_sudo( + client: &OnlineClient, + rpc_client: &RpcClient, + inner: subxt::tx::DynamicPayload, +) -> Result<(), anyhow::Error> { + let sudo_call = subxt::dynamic::tx("Sudo", "sudo", vec![inner.into_value()]); + submit_signed(client, rpc_client, sudo_call, Keyring::Alice).await +} + +async fn submit_signed( + client: &OnlineClient, + rpc_client: &RpcClient, + call: subxt::tx::DynamicPayload, + signer: Keyring, +) -> Result<(), anyhow::Error> { + let call_data = client.tx().call_data(&call)?; + let extrinsic: Bytes = rpc_client + .request( + "simnode_authorExtrinsic", + rpc_params![Bytes::from(call_data), signer.to_account_id().to_ss58check()], + ) + .await + .map_err(|err| anyhow!("simnode_authorExtrinsic failed: {err:?}"))?; + let submittable = SubmittableTransaction::from_bytes(client.clone(), extrinsic.0); + let progress = submittable.submit_and_watch().await?; + let block = rpc_client + .request::>("engine_createBlock", rpc_params![true, false]) + .await?; + let finalized = rpc_client + .request::("engine_finalizeBlock", rpc_params![block.hash]) + .await?; + assert!(finalized); + progress.wait_for_finalized_success().await?; + Ok(()) +} + +/// Fetch a value-storage item for `pallet-beefy-consensus-proofs`. +async fn fetch_storage( + client: &OnlineClient, + item: &str, +) -> Result, anyhow::Error> { + let addr = subxt::dynamic::storage("BeefyConsensusProofs", item, ()); + let raw = client.storage().at_latest().await?.fetch(&addr).await?; + let Some(value) = raw else { return Ok(None) }; + let bytes = value.encoded(); + let decoded = + T::decode(&mut &bytes[..]).map_err(|e| anyhow!("decoding {item} failed: {e:?}"))?; + Ok(Some(decoded)) +} + +/// Fetch raw storage bytes by precomputed key, decoding as `T`. Used when the key needs a +/// hashing scheme `subxt::dynamic::storage`'s metadata bridge can't easily express. +async fn fetch_storage_by_key( + client: &OnlineClient, + key: &[u8], +) -> Result, anyhow::Error> { + let raw = client.storage().at_latest().await?.fetch_raw(key).await?; + let Some(bytes) = raw else { return Ok(None) }; + let decoded = + T::decode(&mut &bytes[..]).map_err(|e| anyhow!("decoding raw storage failed: {e:?}"))?; + Ok(Some(decoded)) +} + +#[tokio::test] +#[ignore] +async fn test_admin_extrinsics_and_submit_proof_validation() -> Result<(), anyhow::Error> { + let port = env::var("PORT").unwrap_or_else(|_| "9990".into()); + let url = format!("ws://127.0.0.1:{port}"); + let (client, rpc_client) = + subxt_utils::client::ws_client::(&url, u32::MAX).await?; + + // 1. set_proof_reward via Sudo, expect storage updated. + let reward: u128 = 12_345_000; + let call = + subxt::dynamic::tx("BeefyConsensusProofs", "set_proof_reward", vec![Value::u128(reward)]); + submit_sudo(&client, &rpc_client, call).await?; + let on_chain: u128 = fetch_storage::(&client, "ProofReward") + .await? + .ok_or_else(|| anyhow!("ProofReward unset after set_proof_reward"))?; + assert_eq!(on_chain, reward); + + // 2. set_sp1_vkey_hash via Sudo, expect storage updated. + let vkey: Vec = + b"0x0059fd0bff44da77999bb7974cbcf2ac7dc89e5869352f20a2f3cd46c9f53d5c".to_vec(); + let call = subxt::dynamic::tx( + "BeefyConsensusProofs", + "set_sp1_vkey_hash", + vec![Value::from_bytes(&vkey)], + ); + submit_sudo(&client, &rpc_client, call).await?; + let on_chain_vkey: Vec = fetch_storage::>(&client, "Sp1VkeyHash") + .await? + .ok_or_else(|| anyhow!("Sp1VkeyHash unset after set_sp1_vkey_hash"))?; + assert_eq!(on_chain_vkey, vkey); + + // 3. set_reward_curve via Sudo with the suggested mainnet defaults (1,1), (4,5), (3,5), (2,5), + // (1,5) — covers position 0..=4. + let curve: Vec<(u32, u32)> = vec![(1, 1), (4, 5), (3, 5), (2, 5), (1, 5)]; + let call = + subxt::dynamic::tx("BeefyConsensusProofs", "set_reward_curve", vec![curve_value(&curve)]); + submit_sudo(&client, &rpc_client, call).await?; + let on_chain_curve: Vec<(u32, u32)> = fetch_storage::>(&client, "RewardCurve") + .await? + .ok_or_else(|| anyhow!("RewardCurve unset after set_reward_curve"))?; + assert_eq!(on_chain_curve, curve); + + // 4. set_reward_curve with a zero denominator. + let bad_curve: Vec<(u32, u32)> = vec![(1, 0)]; + let call = subxt::dynamic::tx( + "BeefyConsensusProofs", + "set_reward_curve", + vec![curve_value(&bad_curve)], + ); + submit_sudo(&client, &rpc_client, call).await?; + let unchanged_curve: Vec<(u32, u32)> = fetch_storage::>(&client, "RewardCurve") + .await? + .ok_or_else(|| anyhow!("RewardCurve unexpectedly cleared"))?; + assert_eq!( + unchanged_curve, curve, + "zero-denominator curve must not overwrite the existing curve", + ); + + // 4b. set_reward_curve with numerator > denominator — would multiply the base reward + // above 100% and could drain the treasury on a fat-finger. + let over_unity: Vec<(u32, u32)> = vec![(1, 1), (3, 2)]; + let call = subxt::dynamic::tx( + "BeefyConsensusProofs", + "set_reward_curve", + vec![curve_value(&over_unity)], + ); + submit_sudo(&client, &rpc_client, call).await?; + let unchanged_curve: Vec<(u32, u32)> = fetch_storage::>(&client, "RewardCurve") + .await? + .ok_or_else(|| anyhow!("RewardCurve unexpectedly cleared"))?; + assert_eq!( + unchanged_curve, curve, + "numerator > denominator curve must not overwrite the existing curve", + ); + + // 5. set_reward_curve oversized vec. With `MaxUncleProvers = 5` the storage cap is + // `MaxStoredProvers = 6`; + let oversized_curve: Vec<(u32, u32)> = + (1..=(MAX_UNCLE_PROVERS as u32 + 2)).map(|i| (1, i)).collect(); + let call = subxt::dynamic::tx( + "BeefyConsensusProofs", + "set_reward_curve", + vec![curve_value(&oversized_curve)], + ); + let result = submit_sudo(&client, &rpc_client, call).await; + assert!( + result.is_err() || + fetch_storage::>(&client, "RewardCurve") + .await? + .unwrap_or_default() == + curve, + "oversized curve should not overwrite the previously stored curve", + ); + + // 6. submit_proof oversized payload — `proof: BoundedVec` rejects at the + // txpool decode stage, before dispatch. We send `MaxProofSize + 1` bytes prefixed with + // `PROOF_TYPE_NAIVE`. + let mut oversized_proof = vec![PROOF_TYPE_NAIVE; MAX_PROOF_SIZE + 1]; + oversized_proof[0] = PROOF_TYPE_NAIVE; + let call = subxt::dynamic::tx( + "BeefyConsensusProofs", + "submit_proof", + vec![Value::from_bytes(&oversized_proof)], + ); + let result = submit_signed(&client, &rpc_client, call, Keyring::Bob).await; + assert!(result.is_err(), "oversized submit_proof must be rejected by the BoundedVec decode",); + + // 7. submit_proof with an unknown proof-type byte. + let unknown_proof = vec![UNKNOWN_PROOF_TYPE; 64]; + let call = subxt::dynamic::tx( + "BeefyConsensusProofs", + "submit_proof", + vec![Value::from_bytes(&unknown_proof)], + ); + let result = submit_signed(&client, &rpc_client, call, Keyring::Bob).await; + assert!(result.is_err(), "unknown proof-type submit_proof must fail (UnknownProofType)",); + + // 8. submit_proof with malformed naive bytes. The byte 0 marks `PROOF_TYPE_NAIVE`, the rest is + // junk that won't ABI-decode as `BeefyConsensusProof`. Expect `AbiDecodeFailed`. + let mut malformed_naive = vec![0u8; 128]; + malformed_naive[0] = PROOF_TYPE_NAIVE; + let call = subxt::dynamic::tx( + "BeefyConsensusProofs", + "submit_proof", + vec![Value::from_bytes(&malformed_naive)], + ); + let result = submit_signed(&client, &rpc_client, call, Keyring::Bob).await; + assert!(result.is_err(), "malformed naive proof must fail (AbiDecodeFailed)",); + + // 9. submit_proof rejects an unsigned origin. We try to author the same call as an unsigned + // extrinsic and expect the txpool / runtime to refuse it (the pallet only accepts + // `ensure_signed`). + let call = subxt::dynamic::tx( + "BeefyConsensusProofs", + "submit_proof", + vec![Value::from_bytes(&malformed_naive)], + ); + let unsigned_result = client.tx().create_unsigned(&call)?.submit_and_watch().await; + let Err(subxt::Error::Rpc(RpcError::ClientError(_))) = unsigned_result else { + panic!("unsigned submit_proof should have been rejected, got {unsigned_result:?}"); + }; + + Ok(()) +} + +/// Walk back from `latest_beefy_hash` until we find a parent block that also carries +/// a BEEFY justification. We use that parent as the trusted-state anchor so the +/// proof at `latest_beefy_hash` is guaranteed to advance state. Mirrors the lookup +/// in `modules/pallets/testsuite/src/tests/pallet_ismp_beefy.rs::setup`. +async fn previous_beefy_anchor( + relay_rpc: &LegacyRpcMethods, + latest_beefy_hash: H256, +) -> Result { + let mut current_hash = latest_beefy_hash; + for _ in 0..1000 { + let header = relay_rpc + .chain_get_header(Some(current_hash.into())) + .await? + .ok_or_else(|| anyhow!("missing header at {current_hash:?}"))?; + let parent_hash: H256 = header.parent_hash.into(); + let block = relay_rpc + .chain_get_block(Some(parent_hash.into())) + .await? + .ok_or_else(|| anyhow!("missing block at {parent_hash:?}"))?; + if let Some(justifications) = block.justifications { + if justifications.iter().any(|j| j.0 == sp_consensus_beefy::BEEFY_ENGINE_ID) { + return Ok(parent_hash); + } + } + current_hash = parent_hash; + } + Err(anyhow!("no prior BEEFY justification found within 1000 blocks")) +} + +/// Build a real BEEFY consensus proof for parachain id 4009 against a live Paseo +/// relay + the gargantua-paseo parachain. Returns the trusted state anchored at +/// the previous BEEFY-justified block and the consensus message that advances +/// state to the latest BEEFY-finalized head. +async fn build_live_naive_proof() -> Result<(ConsensusState, ConsensusMessage), anyhow::Error> { + let max_rpc_payload_size = 15 * 1024 * 1024; + let relay_ws_url = + env::var("RELAY_WS_URL").unwrap_or_else(|_| "wss://paseo.dotters.network".to_string()); + let para_ws_url = env::var("PARA_WS_URL") + .unwrap_or_else(|_| "wss://gargantua.rpc.polytope.technology".to_string()); + + let (relay_client, relay_rpc_client) = + subxt_utils::client::ws_client::(&relay_ws_url, max_rpc_payload_size) + .await?; + let relay_rpc = LegacyRpcMethods::::new(relay_rpc_client.clone()); + let (para_client, para_rpc_client) = + subxt_utils::client::ws_client::(¶_ws_url, max_rpc_payload_size) + .await?; + let para_rpc = LegacyRpcMethods::::new(para_rpc_client.clone()); + + let prover = Prover { + beefy_activation_block: 0, + relay: relay_client, + relay_rpc: relay_rpc.clone(), + relay_rpc_client: relay_rpc_client.clone(), + para: para_client, + para_rpc, + para_rpc_client, + para_ids: vec![4009], + query_batch_size: Some(100), + }; + + let latest_beefy_hash: H256 = + relay_rpc_client.request("beefy_getFinalizedHead", rpc_params!()).await?; + let previous_beefy_hash = previous_beefy_anchor(&relay_rpc, latest_beefy_hash).await?; + let initial_state = + prover.get_initial_consensus_state(Some(previous_beefy_hash.into())).await?; + + let block = relay_rpc + .chain_get_block(Some(latest_beefy_hash.into())) + .await? + .ok_or_else(|| anyhow!("missing latest beefy block"))?; + let beefy_justification = block + .justifications + .ok_or_else(|| anyhow!("latest beefy block lacks justifications"))? + .into_iter() + .find_map(|j| (j.0 == sp_consensus_beefy::BEEFY_ENGINE_ID).then_some(j.1)) + .ok_or_else(|| anyhow!("latest beefy block lacks beefy justification"))?; + let VersionedFinalityProof::V1(signed_commitment_raw) = + VersionedFinalityProof::::decode(&mut &*beefy_justification)?; + + let (mmr_leaf_proof, latest_leaf) = + fetch_mmr_proof(&prover.relay_rpc, signed_commitment_raw.commitment.block_number, None) + .await?; + + let signatures = signed_commitment_raw + .signatures + .iter() + .enumerate() + .filter_map(|(index, sig)| { + sig.as_ref().map(|s| { + let slice: &[u8] = s.as_ref(); + let signature_array: [u8; 65] = + slice.try_into().expect("BEEFY signature is 65 bytes"); + SignatureWithAuthorityIndex { index: index as u32, signature: signature_array } + }) + }) + .collect::>(); + + let current_authorities = prover.beefy_authorities(Some(latest_beefy_hash)).await?; + let authority_address_hashes = + hash_authority_addresses(current_authorities.into_iter().map(|x| x.encode()).collect())?; + let authority_indices = signatures.iter().map(|x| x.index as usize).collect::>(); + let authority_tree = MerkleTree::::from_leaves(&authority_address_hashes); + let authority_proof_hashes = authority_tree.proof(&authority_indices).proof_hashes().to_vec(); + + let signed_commitment = + BvpSignedCommitment { commitment: signed_commitment_raw.commitment.clone(), signatures }; + + let mmr = MmrProof { + signed_commitment, + latest_mmr_leaf: latest_leaf.clone(), + mmr_proof: mmr_leaf_proof, + authority_proof: authority_proof_hashes, + }; + + let parent_hash = H256::decode(&mut &*latest_leaf.parent_number_and_hash.1.encode())?; + let heads = paras_parachains(&prover.relay_rpc, Some(parent_hash.into())).await?; + let (parachains, indices): (Vec<_>, Vec<_>) = prover + .para_ids + .iter() + .map(|id| { + let index = heads + .iter() + .position(|(i, _)| *i == *id) + .unwrap_or_else(|| panic!("paraid {id} missing from relay heads")); + ( + ParachainHeader { + header: heads[index].1.clone(), + index: index as u32, + para_id: heads[index].0, + }, + index, + ) + }) + .unzip(); + let leaves = heads.iter().map(|pair| keccak_256(&pair.encode())).collect::>(); + let parachain_tree = MerkleTree::::from_leaves(&leaves); + let parachain_proof_hashes = parachain_tree.proof(&indices).proof_hashes().to_vec(); + let parachain_proof = ParachainProof { + parachains, + proof: parachain_proof_hashes, + total_leaves: leaves.len() as u32, + }; + + Ok((initial_state, ConsensusMessage { mmr, parachain: parachain_proof })) +} + +/// Tier-2 happy-path test. Builds a real naive BEEFY proof against live Paseo, +/// initializes the trusted state on simnode, then submits the proof through the +/// `submit_proof` extrinsic and asserts the dispatch succeeded. Requires the +/// simnode to be running gargantua-paseo (paraid 4009) and outbound network +/// access to the configured relay/parachain RPCs. Run with `--ignored`. +#[tokio::test] +#[ignore] +async fn test_naive_proof_happy_path() -> Result<(), anyhow::Error> { + eprintln!("[stage] building live naive proof from paseo"); + let (initial_state, consensus_message) = build_live_naive_proof().await?; + let initial_height = initial_state.latest_beefy_height; + let proof_block: u32 = consensus_message.mmr.signed_commitment.commitment.block_number; + eprintln!( + "[stage] proof built: trusted_height={initial_height} proof_block={proof_block} \ + paras={} sigs={}", + consensus_message.parachain.parachains.len(), + consensus_message.mmr.signed_commitment.signatures.len(), + ); + assert!( + proof_block > initial_height, + "proof block {proof_block} must be ahead of trusted height {initial_height}", + ); + + let abi_state: SolBeefyConsensusState = initial_state.into(); + let abi_state_bytes = SolBeefyConsensusState::abi_encode(&abi_state); + + let abi_proof: SolBeefyConsensusProof = consensus_message.into(); + let abi_proof_bytes = ::abi_encode_params(&abi_proof); + let mut wire_proof = Vec::with_capacity(1 + abi_proof_bytes.len()); + wire_proof.push(PROOF_TYPE_NAIVE); + wire_proof.extend_from_slice(&abi_proof_bytes); + eprintln!( + "[stage] abi-encoded: state={} bytes proof={} bytes", + abi_state_bytes.len(), + wire_proof.len(), + ); + + let port = env::var("PORT").unwrap_or_else(|_| "9990".into()); + let url = format!("ws://127.0.0.1:{port}"); + let (client, rpc_client) = + subxt_utils::client::ws_client::(&url, u32::MAX).await?; + + let init_call = subxt::dynamic::tx( + "BeefyConsensusProofs", + "initialize_state", + vec![Value::from_bytes(&abi_state_bytes)], + ); + eprintln!("[stage] submitting initialize_state via sudo"); + submit_sudo(&client, &rpc_client, init_call).await?; + eprintln!("[stage] initialize_state finalized"); + + // Reset `ProofReward` to 0 so `pay_position_reward` short-circuits without trying + // to draw from the (unfunded) treasury account. The Tier-1 test in this module sets + // `ProofReward` to a non-zero value via Sudo; running both tests against the same + // simnode session would otherwise leave Tier-2 hitting `RewardTransferFailed`. + let zero_reward = + subxt::dynamic::tx("BeefyConsensusProofs", "set_proof_reward", vec![Value::u128(0)]); + submit_sudo(&client, &rpc_client, zero_reward).await?; + + let submit_call = subxt::dynamic::tx( + "BeefyConsensusProofs", + "submit_proof", + vec![Value::from_bytes(&wire_proof)], + ); + eprintln!("[stage] submitting submit_proof signed by Bob"); + submit_signed(&client, &rpc_client, submit_call, Keyring::Bob).await?; + eprintln!("[stage] submit_proof finalized"); + + // First-proof path appends `latest_height` to `MessagingProofs`. A non-empty vec + // after `submit_proof` returns success means dispatch ran the full BEEFY check, + // stored a parachain commitment, and ran ring-buffer eviction. Combined with + // `wait_for_finalized_success` having returned ok, that's sufficient evidence + // the naive happy path works end-to-end. + let messaging_proofs: Vec = + fetch_storage::>(&client, "MessagingProofs").await?.unwrap_or_default(); + assert!( + !messaging_proofs.is_empty(), + "MessagingProofs must contain the proven height after a successful first proof", + ); + + Ok(()) +} + +/// Tier-3 SP1 uncle dispatch path. Mirrors the bench in +/// `modules/pallets/beefy-consensus-proofs/src/benchmarking.rs::submit_proof`: the live +/// BEEFY consensus state is forced to `LIVE_STATE_SCALE` (whose `latest_beefy_height` +/// equals the SP1 fixture proof's `block_number`), so dispatch hits the SP1 verifier's +/// own `StaleHeight` short-circuit before any cryptographic work and the pallet maps +/// that to `StaleProof`, routing the proof to `settle_uncle_proof`. `ProofContext` is +/// pre-seeded with the older `TRUSTED_STATE_SCALE` snapshot so SP1 verification inside +/// the uncle path actually succeeds. +/// +/// Multi-uncle accumulation is exercised by appending unique suffix bytes to the SP1 +/// fixture for each successive submitter. `alloy-sol-types` 1.5.7's +/// `SP1BeefyProof::abi_decode_params` reads only the bytes the encoded struct needs and +/// silently ignores any trailing junk, so each `WIRE_PROOF ++ ` decodes to the +/// same `SP1BeefyProof` (verifies against the same Groth16 commitment + public inputs) +/// while producing a distinct `keccak256(proof)` — distinct enough to land in fresh +/// `AcceptedProofHashes` slots without tripping dedup. This is a test-only trick; in +/// production every relayer's SP1 prover already produces independently-randomised +/// proof bytes. +/// +/// Four sequential submissions cover the uncle outcomes the pallet exposes: +/// +/// 1. Bob (`WIRE_PROOF`): SP1 verifies, uncle accepted at position 0. `ProverCount` becomes 1 and +/// the proof hash is recorded. +/// 2. Charlie (`WIRE_PROOF ++ [0xAA]`): distinct hash, SP1 verifies again, uncle accepted at +/// position 1. `ProverCount` becomes 2. +/// 3. Dave (`WIRE_PROOF ++ [0xBB, 0xBB]`): distinct hash, uncle accepted at position 2. +/// `ProverCount` becomes 3. +/// 4. Ferdie (`WIRE_PROOF`, same bytes Bob already submitted): same hash as (1) → trips the +/// `AcceptedProofHashes` dedup inside `settle_uncle_proof` and the dispatch fails with +/// `ProofAlreadySubmitted`. State invariants must hold: `ProverCount` stays at 3 and +/// `AcceptedProofHashes` retains exactly the three accepted hashes. +/// +/// Together these prove the uncle dispatch surface is wired up end-to-end on the live +/// runtime, that `ProverCount` advances correctly across multiple accepts, and that +/// dedup keeps rejecting after several successful uncles. No live network access is +/// required — the SP1 fixture is static. +#[tokio::test] +#[ignore] +async fn test_sp1_uncle_proof_dispatch_path() -> Result<(), anyhow::Error> { + eprintln!("[stage] sp1 uncle dispatch path"); + let port = env::var("PORT").unwrap_or_else(|_| "9990".into()); + let url = format!("ws://127.0.0.1:{port}"); + let (client, rpc_client) = + subxt_utils::client::ws_client::(&url, u32::MAX).await?; + + // 1. Switch the SP1 vkey to the fixture vkey so verification against the SP1 proof matches. + // Idempotent — no-op when Tier-1 left the same vkey in place. + let vkey_call = subxt::dynamic::tx( + "BeefyConsensusProofs", + "set_sp1_vkey_hash", + vec![Value::from_bytes(SP1_FIXTURE_VKEY)], + ); + submit_sudo(&client, &rpc_client, vkey_call).await?; + + // 2. `pay_position_reward` would otherwise try to draw from an unfunded treasury and blow up + // the uncle accept; mirror Tier-2's defensive reset. + let zero_reward = + subxt::dynamic::tx("BeefyConsensusProofs", "set_proof_reward", vec![Value::u128(0)]); + submit_sudo(&client, &rpc_client, zero_reward).await?; + + // 3. `settle_uncle_proof` looks up `ProofContext[Self::latest_height()]`. After a successful + // first proof, `settle_first_proof` pushes that same height into `MessagingProofs`, so its + // last entry is a faithful proxy. When Tier-3 runs in isolation `MessagingProofs` is empty + // and `latest_height()` is 0, matching what we'd seed. + let parachain_height: u64 = fetch_storage::>(&client, "MessagingProofs") + .await? + .and_then(|v| v.last().copied()) + .unwrap_or(0); + eprintln!("[stage] seeding ProofContext at parachain_height={parachain_height}"); + + // 4. Force the BEEFY consensus state and seed the uncle snapshot via `System::set_storage`. We + // override regardless of whether Tier-2 already ran `initialize_state` — the four ISMP keys + // cover the fresh-simnode case where `ConsensusStateClient` / `UnbondingPeriod` / + // `ConsensusClientUpdateTime` aren't populated yet, while `ConsensusStates` overrides + // whatever Tier-2 advanced state to. The values mirror what + // `pallet_ismp::create_consensus_client` writes during the bench setup. + let now_secs = SystemTime::now() + .duration_since(UNIX_EPOCH) + .map_err(|e| anyhow!("system time: {e:?}"))? + .as_secs(); + let kv_list: Vec<(Vec, Vec)> = vec![ + // `Ismp::ConsensusStates` is `Twox64Concat, ConsensusClientId -> Vec`. + ( + twox_64_concat_key(b"Ismp", b"ConsensusStates", &BEEFY_CONSENSUS_ID), + LIVE_STATE_SCALE.to_vec().encode(), + ), + // `Ismp::ConsensusStateClient` is `Blake2_128Concat, ConsensusStateId -> + // ConsensusClientId`. ConsensusClientId is `[u8; 4]`. + ( + blake2_128_concat_key(b"Ismp", b"ConsensusStateClient", &BEEFY_CONSENSUS_ID), + BEEFY_CONSENSUS_ID.encode(), + ), + // `Ismp::UnbondingPeriod` is `Blake2_128Concat, ConsensusStateId -> u64`. One + // year is comfortably above the fixture timestamp window. + ( + blake2_128_concat_key(b"Ismp", b"UnbondingPeriod", &BEEFY_CONSENSUS_ID), + (60u64 * 60 * 24 * 365).encode(), + ), + // `Ismp::ConsensusClientUpdateTime` is `Twox64Concat, ConsensusClientId -> u64`. + ( + twox_64_concat_key(b"Ismp", b"ConsensusClientUpdateTime", &BEEFY_CONSENSUS_ID), + now_secs.encode(), + ), + // `BeefyConsensusProofs::ProofContext` is `Blake2_128Concat, u64 -> Vec`. + ( + blake2_128_concat_key( + b"BeefyConsensusProofs", + b"ProofContext", + ¶chain_height.encode(), + ), + TRUSTED_STATE_SCALE.to_vec().encode(), + ), + ]; + let set_storage_call = + subxt::dynamic::tx("System", "set_storage", vec![storage_kv_list_to_value(&kv_list)]); + submit_sudo(&client, &rpc_client, set_storage_call).await?; + eprintln!("[stage] consensus + uncle snapshot seeded"); + + // Bob lands the only valid SP1 fixture; Ferdie resubmits identical bytes to exercise + // dedup. We can't cook multiple distinct uncles cheaply (each needs its own SP1 + // Groth16 proof from `polytope-labs/sp1-beefy`), so the multi-position fan-out is + // covered by the bench instead. Trailing-byte malleability is now rejected at the + // extrinsic boundary by `do_submit_proof`'s round-trip check. + let bob_proof = SP1_WIRE_PROOF.to_vec(); + let ferdie_proof = bob_proof.clone(); + let proof_context_key = + blake2_128_concat_key(b"BeefyConsensusProofs", b"ProofContext", ¶chain_height.encode()); + let prover_count_key = + blake2_128_concat_key(b"BeefyConsensusProofs", b"ProverCount", ¶chain_height.encode()); + let accepted_hashes_key = blake2_128_concat_key( + b"BeefyConsensusProofs", + b"AcceptedProofHashes", + ¶chain_height.encode(), + ); + + let bob_hash: H256 = keccak_256(&bob_proof).into(); + + // 5. Bob: WIRE_PROOF as-is. Position 0. + eprintln!("[stage] submit (Bob) — expect uncle accept at position 0"); + submit_signed( + &client, + &rpc_client, + subxt::dynamic::tx( + "BeefyConsensusProofs", + "submit_proof", + vec![Value::from_bytes(&bob_proof)], + ), + Keyring::Bob, + ) + .await?; + let count: u32 = fetch_storage_by_key::(&client, &prover_count_key).await?.unwrap_or(0); + let hashes: Vec = fetch_storage_by_key::>(&client, &accepted_hashes_key) + .await? + .unwrap_or_default(); + let ctx: Option> = fetch_storage_by_key::>(&client, &proof_context_key).await?; + assert_eq!(count, 1, "Bob's uncle should set ProverCount to 1"); + assert_eq!(hashes, vec![bob_hash], "AcceptedProofHashes should record Bob's hash"); + assert!(ctx.is_some(), "ProofContext snapshot must persist across uncle accepts"); + + // 6. Ferdie: WIRE_PROOF (same bytes as Bob). Same hash → `AcceptedProofHashes` dedup fires + // inside `settle_uncle_proof`, dispatch errors with `ProofAlreadySubmitted`. + eprintln!( + "[stage] submit (Ferdie) — expect ProofAlreadySubmitted (Bob's hash already recorded)" + ); + let ferdie_result = submit_signed( + &client, + &rpc_client, + subxt::dynamic::tx( + "BeefyConsensusProofs", + "submit_proof", + vec![Value::from_bytes(&ferdie_proof)], + ), + Keyring::Ferdie, + ) + .await; + assert!( + ferdie_result.is_err(), + "duplicate uncle submission must be rejected by AcceptedProofHashes dedup", + ); + + // State invariants across the failed dispatch — dedup short-circuits before + // `ProverCount` is bumped or another hash is appended. + let count: u32 = fetch_storage_by_key::(&client, &prover_count_key).await?.unwrap_or(0); + let hashes: Vec = fetch_storage_by_key::>(&client, &accepted_hashes_key) + .await? + .unwrap_or_default(); + assert_eq!(count, 1, "rejected duplicate must not bump ProverCount past 1"); + assert_eq!(hashes, vec![bob_hash], "rejected duplicate must not mutate AcceptedProofHashes",); + + Ok(()) +} diff --git a/tesseract/consensus/beefy/src/backend/onchain.rs b/tesseract/consensus/beefy/src/backend/onchain.rs index c6cd4ea58..569e9e65d 100644 --- a/tesseract/consensus/beefy/src/backend/onchain.rs +++ b/tesseract/consensus/beefy/src/backend/onchain.rs @@ -16,39 +16,36 @@ //! //! Instead of queuing proofs for a separate host process, this backend submits proofs //! directly to the `pallet-beefy-consensus-proofs` on the hyperbridge parachain via -//! unsigned extrinsics. +//! signed extrinsics. The signer is the reward payee. use super::{ConsensusProof, ProofBackend, QueueMessage, StreamMessage}; use alloy_sol_types::SolType; use anyhow::anyhow; use beefy_verifier_primitives::ConsensusState; -use codec::{Decode, Encode}; +use codec::Decode; use futures::Stream; -use ismp::{ - consensus::{ConsensusStateId, StateMachineId}, - host::StateMachine, -}; -use pallet_beefy_consensus_proofs::types::SIGNATURE_DOMAIN; +use ismp::{consensus::StateMachineId, host::StateMachine}; use polkadot_sdk::*; -use sp_core::{sr25519, Pair}; +use sp_core::sr25519; use sp_runtime::{generic::Header, traits::Header as _}; use std::{pin::Pin, sync::Arc}; use subxt::{ + config::ExtrinsicParams, dynamic::Value, ext::subxt_rpcs::{rpc_params, RpcClient}, - utils::MultiSignature, + tx::DefaultParams, + utils::{AccountId32, MultiSignature}, OnlineClient, }; -use tesseract_substrate::extrinsic::send_unsigned_extrinsic; +use subxt_utils::{send_extrinsic, InMemorySigner}; use tokio::sync::RwLock; /// Proof backend that submits proofs directly to `pallet-beefy-consensus-proofs` /// on the hyperbridge parachain. /// /// When `send_mandatory_proof` or `send_messages_proof` is called, this backend -/// constructs a `SubmitProofPayload`, signs it with SR25519, and submits the -/// unsigned extrinsic directly to the chain. The pallet handles verification, -/// consensus state updates, and reward distribution. +/// signs and submits a `BeefyConsensusProofs::submit_proof` extrinsic carrying +/// the raw proof bytes. The signer is the reward payee. /// /// The host-side methods (`receive_*`, `queue_notifications`, `delete_message`) /// are no-ops since the pallet processes proofs inline — there is no intermediate @@ -81,35 +78,22 @@ impl

OnchainBackend

where P: subxt::Config + Send + Sync, P::Signature: From + Send + Sync, + P::AccountId: From + Into + Clone + 'static + Send + Sync, + >::Params: Send + Sync + DefaultParams, { - /// Submit a consensus proof to `pallet-beefy-consensus-proofs::submit_proof`. + /// Submit a consensus proof to `pallet-beefy-consensus-proofs::submit_proof` as a + /// signed extrinsic. The signer becomes the reward payee. async fn submit_to_pallet(&self, proof: &ConsensusProof) -> Result<(), anyhow::Error> { let proof_bytes = proof.message.consensus_proof.clone(); - let submitter_bytes: [u8; 32] = self.signer.public().0; - - // Sign: keccak256((SIGNATURE_DOMAIN, submitter, keccak256(proof)).encode()) - let proof_digest = sp_core::hashing::keccak_256(&proof_bytes); - let msg_preimage = (SIGNATURE_DOMAIN, submitter_bytes, proof_digest).encode(); - let signed_msg = sp_core::hashing::keccak_256(&msg_preimage); - let signature = self.signer.sign(&signed_msg); - // Construct the dynamic extrinsic - let payload_value = Value::named_composite([ - ("submitter", Value::from_bytes(submitter_bytes)), - ("proof", Value::from_bytes(proof_bytes.clone())), - ]); - let signature_value = Value::from_bytes(signature.0); - - let tx = subxt::dynamic::tx( - "BeefyConsensusProofs", - "submit_proof", - vec![payload_value, signature_value], - ); + let payload_value = Value::from_bytes(proof_bytes.clone()); + let tx = subxt::dynamic::tx("BeefyConsensusProofs", "submit_proof", vec![payload_value]); - let result = send_unsigned_extrinsic(&self.client, tx, false).await; + let signer = InMemorySigner::

::new(self.signer.clone()); + let result = send_extrinsic(&self.client, &signer, &tx, None).await; // Wait one block so that load_state() on the next iteration sees the - // updated LastProvenHeight written by the pallet in the previous block. + // state advance written by the pallet in the previous block. let mut blocks = self.client.blocks().subscribe_best().await?; let _ = blocks.next().await; @@ -151,6 +135,8 @@ impl

ProofBackend for OnchainBackend

where P: subxt::Config + Send + Sync, P::Signature: From + Send + Sync, + P::AccountId: From + Into + Clone + 'static + Send + Sync, + >::Params: Send + Sync + DefaultParams, { async fn init_queues(&self, _state_machines: &[StateMachine]) -> Result<(), anyhow::Error> { // No queues needed — proofs are submitted directly to the pallet.