diff --git a/Cargo.toml b/Cargo.toml index 185a399ba..222f5e5f6 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -3,6 +3,7 @@ resolver = "2" members = [ "api", + "zks_get_proof_verifier", "basic_bootloader", "basic_system", "callable_oracles", @@ -105,4 +106,3 @@ debug = true # prover_examples = { path = "../zksync-airbender/circuit_defs/prover_examples" } # risc_v_simulator = { path = "../zksync-airbender/risc_v_simulator" } # execution_utils = { path = "../zksync-airbender/execution_utils" } - diff --git a/api/Cargo.toml b/api/Cargo.toml index 0c285c3b8..5793cc76a 100644 --- a/api/Cargo.toml +++ b/api/Cargo.toml @@ -18,13 +18,14 @@ zksync_os_runner = { path = "../zksync_os_runner"} risc_v_simulator = { workspace = true } zk_ee = { path = "../zk_ee", default-features = false } crypto = { path = "../crypto", default-features = false } -basic_system = { path = "../basic_system", default-features = false } +basic_system = { path = "../basic_system", default-features = false, features = ["get-proof"] } basic_bootloader = { path = "../basic_bootloader", default-features = false } evm_interpreter = { path = "../evm_interpreter", default-features = false } ruint = { workspace = true, default-features = false } alloy = { version = "=1", default-features = false, features = ["eip712", "consensus", "rpc-types", "signer-local", "dyn-abi", "network"] } zksync_os_interface = { workspace = true } alloy-sol-types = "1" +zks_get_proof_verifier = { path = "../zks_get_proof_verifier", default-features = false, features = ["serde"] } [features] diff --git a/api/src/get_proof.rs b/api/src/get_proof.rs new file mode 100644 index 000000000..4c2426ece --- /dev/null +++ b/api/src/get_proof.rs @@ -0,0 +1,56 @@ +//! Re-exports and convenience helpers for `zks_getProof` verification. + +use crypto::MiniDigest; + +pub use zks_get_proof_verifier::{ + LeafWithProof, StateCommitmentPreimage, StorageProof, StorageProofType, ZksGetProofHasher, + ZksGetProofResponse, ZksGetProofVerificationError, MAX_32_BYTES, ZERO_32_BYTES, +}; + +pub use zks_get_proof_verifier::compute_state_commitment as compute_state_commitment_with_hasher; +pub use zks_get_proof_verifier::verify_response as verify_response_with_hasher; + +#[derive(Clone, Debug)] +pub struct Blake2sGetProofHasher { + hasher: crypto::blake2s::Blake2s256, +} + +impl Blake2sGetProofHasher { + pub fn new() -> Self { + Self { + hasher: crypto::blake2s::Blake2s256::new(), + } + } +} + +impl Default for Blake2sGetProofHasher { + fn default() -> Self { + Self::new() + } +} + +impl ZksGetProofHasher for Blake2sGetProofHasher { + fn update(&mut self, input: impl AsRef<[u8]>) { + self.hasher.update(input); + } + + fn finalize_reset(&mut self) -> [u8; 32] { + self.hasher.finalize_reset() + } +} + +pub fn compute_state_commitment( + state_root: &[u8; 32], + preimage: &StateCommitmentPreimage, +) -> [u8; 32] { + let mut hasher = Blake2sGetProofHasher::new(); + zks_get_proof_verifier::compute_state_commitment(&mut hasher, state_root, preimage) +} + +pub fn verify_response( + response: &ZksGetProofResponse, + expected_batch_hash: &[u8; 32], +) -> Result, ZksGetProofVerificationError> { + let mut hasher = Blake2sGetProofHasher::new(); + zks_get_proof_verifier::verify_response::(response, expected_batch_hash, &mut hasher) +} diff --git a/api/src/lib.rs b/api/src/lib.rs index 913ebd84c..3a4b95ca4 100644 --- a/api/src/lib.rs +++ b/api/src/lib.rs @@ -8,6 +8,7 @@ use forward_system::run::{ }; use oracle_provider::ReadWitnessSource; use zksync_os_interface::traits::TxListSource; +pub mod get_proof; pub mod helpers; /// Runs the batch, and returns the output (that contains gas usage, transaction status etc.). diff --git a/basic_bootloader/src/bootloader/block_flow/zk/post_tx_op/public_input.rs b/basic_bootloader/src/bootloader/block_flow/zk/post_tx_op/public_input.rs index 937802902..2bd2b9514 100644 --- a/basic_bootloader/src/bootloader/block_flow/zk/post_tx_op/public_input.rs +++ b/basic_bootloader/src/bootloader/block_flow/zk/post_tx_op/public_input.rs @@ -2,45 +2,9 @@ use crypto::sha3::Keccak256; use crypto::MiniDigest; use ruint::aliases::U256; use zk_ee::common_structs::da_commitment_scheme::DACommitmentScheme; +pub use zk_ee::common_structs::ChainStateCommitment; use zk_ee::utils::Bytes32; -/// -/// Commitment to state that we need to keep between blocks execution: -/// - state commitment(`state_root` and `next_free_slot`) -/// - block number -/// - last 256 block hashes, previous can be "unrolled" from the last, but we commit to 256 for optimization. -/// - last block timestamp, to ensure that block timestamps are not decreasing. -/// -/// This commitment(hash of its fields) will be saved on the settlement layer. -/// With proofs, we'll ensure that the values used during block execution correspond to this commitment. -/// -#[derive(Debug)] -pub struct ChainStateCommitment { - pub state_root: Bytes32, - pub next_free_slot: u64, - pub block_number: u64, - pub last_256_block_hashes_blake: Bytes32, - pub last_block_timestamp: u64, -} - -impl ChainStateCommitment { - /// - /// Calculate blake2s hash of chain state commitment. - /// - /// We are using proving friendly blake2s because this commitment will be generated and opened during proving, - /// but we don't need to open it on the settlement layer. - /// - pub fn hash(&self) -> [u8; 32] { - let mut hasher = crypto::blake2s::Blake2s256::new(); - hasher.update(self.state_root.as_u8_ref()); - hasher.update(&self.next_free_slot.to_be_bytes()); - hasher.update(&self.block_number.to_be_bytes()); - hasher.update(self.last_256_block_hashes_blake.as_u8_ref()); - hasher.update(&self.last_block_timestamp.to_be_bytes()); - hasher.finalize() - } -} - /// /// Except for proving existence of batch(of blocks) that changes state from one to another, we want to open some info about this batch on the settlement layer: /// - pubdata: to make sure that it's published and state is recoverable diff --git a/basic_system/Cargo.toml b/basic_system/Cargo.toml index 3e408d948..7c63709c4 100644 --- a/basic_system/Cargo.toml +++ b/basic_system/Cargo.toml @@ -24,6 +24,7 @@ num-bigint = { version = "0.4", optional = true} num-traits = { version = "*", optional = true} system_hooks = { path = "../system_hooks", default-features = false } cc-traits = { path = "src/system_implementation/ethereum_storage_model/supporting_crates/cc-traits" } +zks_get_proof_verifier = { path = "../zks_get_proof_verifier", default-features = false, optional = true } cfg-if = "1.0.0" const_for = "0.1.5" @@ -35,7 +36,9 @@ paste = "1.0.15" zerocopy = { workspace = true } [features] -testing = ["zk_ee/testing", "evm_interpreter/testing", "serde", "ruint/serde", "rand", "crypto/testing", "num-bigint", "num-traits"] +serde = ["dep:serde"] +get-proof = ["dep:zks_get_proof_verifier"] +testing = ["zk_ee/testing", "evm_interpreter/testing", "serde", "ruint/serde", "rand", "crypto/testing", "num-bigint", "num-traits", "get-proof", "zks_get_proof_verifier/serde"] default = ["testing"] cycle_marker = ["cycle_marker/log_to_file"] proving = ["crypto/proving"] diff --git a/basic_system/src/system_implementation/flat_storage_model/get_proof.rs b/basic_system/src/system_implementation/flat_storage_model/get_proof.rs new file mode 100644 index 000000000..38c0d251a --- /dev/null +++ b/basic_system/src/system_implementation/flat_storage_model/get_proof.rs @@ -0,0 +1,282 @@ +//! Helpers for the `zks_getProof` API. +//! +//! This module keeps prover/testing integration for `FlatStorageBacking`. +//! Shared `zks_getProof` types and verification logic live in the standalone +//! `zks_get_proof_verifier` crate. + +use zk_ee::utils::Bytes32 as StorageBytes32; + +pub use zks_get_proof_verifier::{ + compute_state_commitment, verify_response, LeafWithProof, StateCommitmentPreimage, + StorageProof, StorageProofType, ZksGetProofHasher, ZksGetProofResponse, + ZksGetProofVerificationError, ZERO_32_BYTES, +}; + +#[inline(always)] +fn to_proof_bytes32(value: StorageBytes32) -> [u8; 32] { + value.as_u8_array() +} + +#[cfg(test)] +#[inline(always)] +fn from_proof_bytes32(value: [u8; 32]) -> StorageBytes32 { + StorageBytes32::from_array(value) +} + +/// Prover helpers (testing only). +pub mod prover { + use alloc::alloc::Global; + use alloc::vec::Vec; + use core::alloc::Allocator; + + use crate::system_implementation::flat_storage_model::simple_growable_storage::FlatStorageHasher; + use crypto::MiniDigest; + use ruint::aliases::B160; + use zk_ee::common_structs::derive_flat_storage_key_with_hasher; + use zk_ee::utils::Bytes32 as StorageBytes32; + + use super::super::{compute_empty_hashes, Blake2sStorageHasher, FlatStorageBacking}; + use super::{ + to_proof_bytes32, LeafWithProof, StateCommitmentPreimage, StorageProof, StorageProofType, + ZksGetProofResponse, + }; + + fn compress_siblings( + path: &[StorageBytes32; N], + empty_hashes: &[StorageBytes32; N], + ) -> Vec<[u8; 32]> { + let mut last_non_empty: Option = None; + for i in (0..N).rev() { + if path[i] != empty_hashes[i] { + last_non_empty = Some(i); + break; + } + } + let len = last_non_empty.map(|idx| idx + 1).unwrap_or(0); + path[..len] + .iter() + .copied() + .map(to_proof_bytes32) + .collect::>() + } + + fn leaf_with_proof( + proof: &super::super::LeafProof, + empty_hashes: &[StorageBytes32; N], + ) -> LeafWithProof { + let path: &[StorageBytes32; N] = &proof.path; + LeafWithProof { + index: proof.index, + leaf_key: to_proof_bytes32(proof.leaf.key), + value: to_proof_bytes32(proof.leaf.value), + next_index: proof.leaf.next, + siblings: compress_siblings(path, empty_hashes), + } + } + + impl + FlatStorageBacking + { + /// Builds the commitment preimage from the current tree and block metadata. + pub fn state_commitment_preimage( + &self, + block_number: u64, + last256_block_hashes_blake: StorageBytes32, + last_block_timestamp: u64, + ) -> StateCommitmentPreimage { + StateCommitmentPreimage { + next_free_slot: self.next_free_slot, + block_number, + last256_block_hashes_blake: to_proof_bytes32(last256_block_hashes_blake), + last_block_timestamp, + } + } + + /// Produces a `zks_getProof`-style response for the requested keys. + pub fn prove_zks_get_proof( + &self, + address: B160, + keys: &[StorageBytes32], + preimage: StateCommitmentPreimage, + ) -> ZksGetProofResponse { + let mut empty_hasher = Blake2sStorageHasher::new(); + let empty_hashes = + compute_empty_hashes::(&mut empty_hasher, Global); + let mut key_hasher = crypto::blake2s::Blake2s256::new(); + + let mut storage_proofs = Vec::with_capacity(keys.len()); + for key in keys { + let flat_key = derive_flat_storage_key_with_hasher(&address, key, &mut key_hasher); + let proof = match self.get(&flat_key) { + super::super::ReadValueWithProof::Existing { proof } => { + let path: &[StorageBytes32; N] = &proof.existing.path; + StorageProofType::Existing { + index: proof.existing.index, + value: to_proof_bytes32(proof.existing.leaf.value), + next_index: proof.existing.leaf.next, + siblings: compress_siblings(path, &empty_hashes), + } + } + super::super::ReadValueWithProof::New { proof, .. } => { + let left_neighbor = leaf_with_proof(&proof.previous, &empty_hashes); + let right_neighbor = leaf_with_proof(&proof.next, &empty_hashes); + StorageProofType::NonExisting { + left_neighbor, + right_neighbor, + } + } + }; + + storage_proofs.push(StorageProof { + key: to_proof_bytes32(*key), + proof, + }); + } + + ZksGetProofResponse { + address, + state_commitment_preimage: preimage, + storage_proofs, + } + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use alloc::alloc::Global; + use alloc::collections::BTreeMap; + use alloc::vec::Vec; + use crypto::MiniDigest; + use rand::rngs::StdRng; + use rand::{RngCore, SeedableRng}; + use ruint::aliases::B160; + use zk_ee::common_structs::derive_flat_storage_key; + + use super::super::{TestingTree, TREE_HEIGHT}; + + #[derive(Clone, Debug)] + struct Blake2sGetProofHasher { + hasher: crypto::blake2s::Blake2s256, + } + + impl Blake2sGetProofHasher { + fn new() -> Self { + Self { + hasher: crypto::blake2s::Blake2s256::new(), + } + } + } + + impl ZksGetProofHasher for Blake2sGetProofHasher { + fn update(&mut self, input: impl AsRef<[u8]>) { + self.hasher.update(input); + } + + fn finalize_reset(&mut self) -> [u8; 32] { + self.hasher.finalize_reset() + } + } + + fn random_bytes32(rng: &mut impl RngCore) -> StorageBytes32 { + let mut bytes = [0u8; 32]; + rng.fill_bytes(&mut bytes); + StorageBytes32::from_array(bytes) + } + + fn random_address(rng: &mut impl RngCore) -> B160 { + let mut bytes = [0u8; 20]; + rng.fill_bytes(&mut bytes); + B160::from_be_bytes(bytes) + } + + fn build_response() -> (ZksGetProofResponse, [u8; 32], [u8; 32]) { + let mut rng = StdRng::seed_from_u64(0x5eedd00d); + let address = random_address(&mut rng); + + let mut leaves_map: BTreeMap = BTreeMap::new(); + let mut existing_keys = Vec::new_in(Global); + while existing_keys.len() < 4 { + let key = random_bytes32(&mut rng); + let value = random_bytes32(&mut rng); + let flat_key = derive_flat_storage_key(&address, &key); + if leaves_map.insert(flat_key, value).is_none() { + existing_keys.push(key); + } + } + + // Add some extra random accounts/slots to the tree. + for _ in 0..6 { + let other_address = random_address(&mut rng); + for _ in 0..2 { + let key = random_bytes32(&mut rng); + let value = random_bytes32(&mut rng); + let flat_key = derive_flat_storage_key(&other_address, &key); + leaves_map.entry(flat_key).or_insert(value); + } + } + + let mut leaves = Vec::new_in(Global); + for (key, value) in leaves_map.iter() { + leaves.push((*key, *value)); + } + + let tree = TestingTree::::new_in_with_leaves(Global, leaves); + + let mut missing_key = random_bytes32(&mut rng); + let mut missing_flat_key = derive_flat_storage_key(&address, &missing_key); + while leaves_map.contains_key(&missing_flat_key) { + missing_key = random_bytes32(&mut rng); + missing_flat_key = derive_flat_storage_key(&address, &missing_key); + } + + let keys_to_prove = vec![existing_keys[0], missing_key]; + let preimage = tree.state_commitment_preimage(42, random_bytes32(&mut rng), 1_700_000_000); + let response = tree.prove_zks_get_proof(address, &keys_to_prove, preimage); + + let mut verifier_hasher = Blake2sGetProofHasher::new(); + let batch_hash = compute_state_commitment( + &mut verifier_hasher, + &to_proof_bytes32(*tree.root()), + &response.state_commitment_preimage, + ); + + let existing_flat_key = derive_flat_storage_key(&address, &existing_keys[0]); + let expected_existing_value = + to_proof_bytes32(leaves_map.get(&existing_flat_key).copied().unwrap()); + + (response, batch_hash, expected_existing_value) + } + + #[test] + fn zks_get_proof_roundtrip_deterministic_positions() { + let (response, batch_hash, expected_existing_value) = build_response::(); + let mut verifier_hasher = Blake2sGetProofHasher::new(); + let values = response + .verify_with::(&batch_hash, &mut verifier_hasher) + .expect("proof must verify"); + assert_eq!(values[0], expected_existing_value); + assert_eq!(values[1], ZERO_32_BYTES); + } + + #[test] + fn zks_get_proof_roundtrip_random_positions() { + let (response, batch_hash, expected_existing_value) = build_response::(); + let mut verifier_hasher = Blake2sGetProofHasher::new(); + let values = response + .verify_with::(&batch_hash, &mut verifier_hasher) + .expect("proof must verify"); + assert_eq!(values[0], expected_existing_value); + assert_eq!(values[1], ZERO_32_BYTES); + } + + #[test] + fn zks_get_proof_json_roundtrip() { + let (response, _batch_hash, _expected_existing_value) = build_response::(); + let encoded = serde_json::to_vec(&response).expect("response json serialization failed"); + let decoded: ZksGetProofResponse = + serde_json::from_slice(&encoded).expect("response json deserialization failed"); + assert!(response == decoded); + } +} diff --git a/basic_system/src/system_implementation/flat_storage_model/mod.rs b/basic_system/src/system_implementation/flat_storage_model/mod.rs index b0e6a7541..aacab7b25 100644 --- a/basic_system/src/system_implementation/flat_storage_model/mod.rs +++ b/basic_system/src/system_implementation/flat_storage_model/mod.rs @@ -7,12 +7,16 @@ pub mod account_cache; mod account_cache_entry; pub mod cost_constants; +#[cfg(any(feature = "get-proof", test))] +pub mod get_proof; pub mod preimage_cache; mod simple_growable_storage; pub mod storage_cache; pub use self::account_cache::*; pub use self::account_cache_entry::*; +#[cfg(any(feature = "get-proof", test))] +pub use self::get_proof::*; pub use self::preimage_cache::*; pub use self::simple_growable_storage::*; pub use self::storage_cache::*; diff --git a/basic_system/src/system_implementation/flat_storage_model/simple_growable_storage.rs b/basic_system/src/system_implementation/flat_storage_model/simple_growable_storage.rs index 0d5cf905a..5b82dd292 100644 --- a/basic_system/src/system_implementation/flat_storage_model/simple_growable_storage.rs +++ b/basic_system/src/system_implementation/flat_storage_model/simple_growable_storage.rs @@ -1310,18 +1310,25 @@ pub fn recompute_root_from_proof, ) -> Bytes32 { - let leaf_hash = hasher.hash_leaf(&proof.leaf); + recompute_root_from_leaf_and_path(hasher, proof.index, &proof.leaf, &proof.path) +} + +pub fn recompute_root_from_leaf_and_path( + hasher: &mut H, + index: u64, + leaf: &FlatStorageLeaf, + path: &[Bytes32; N], +) -> Bytes32 { + let leaf_hash = hasher.hash_leaf(leaf); let mut current = leaf_hash; - let mut index = proof.index; - let path_ref: &[Bytes32] = &*proof.path; - for path in path_ref.iter() { - let path: &Bytes32 = path; + let mut index = index; + for sibling in path { let (left, right) = if index & 1 == 0 { // current is left - (¤t, path) + (¤t, sibling) } else { - (path, ¤t) + (sibling, ¤t) }; let next = hasher.hash_node(left, right); current = next; diff --git a/docs/api/zks_getProof.md b/docs/api/zks_getProof.md new file mode 100644 index 000000000..cc97480e4 --- /dev/null +++ b/docs/api/zks_getProof.md @@ -0,0 +1,216 @@ +# `zks_getProof` + +Returns a Merkle proof for a given account storage slot, verifiable against the L1 batch commitment. + +## Parameters + +| # | Name | Type | Description | +|---|------|------|-------------| +| 1 | `address` | `Address` | The account address. | +| 2 | `keys` | `H256[]` | Array of storage keys to prove. | +| 3 | `l1BatchNumber` | `uint64` | The L1 batch number against which the proof should be generated. The proof is for the state **after** this batch. | + +## Response + +```json +{ + "address": "0x...", + "stateCommitmentPreimage": { + "nextFreeSlot": "0x...", + "blockNumber": "0x...", + "last256BlockHashesBlake": "0x...", + "lastBlockTimestamp": "0x..." + }, + "storageProofs": [ + { + "key": "0x...", + "proof": { ... } + } + ] +} +``` + +### `address` + +The account address, as provided in the request. Included in the response so the verifier can derive the flat storage key (`blake2s(address_padded32_be || key)`) without external context. + +### `stateCommitmentPreimage` + +The preimage fields needed to recompute the L1 state commitment from the Merkle root. These are constant per batch and shared across all storage proofs in the response. + +| Field | Type | Description | +|-------|------|-------------| +| `nextFreeSlot` | `uint64` | The next available leaf index in the state tree after this batch. Part of the tree commitment. | +| `blockNumber` | `uint64` | The last L2 block number in this batch. | +| `last256BlockHashesBlake` | `H256` | `blake2s` of the concatenation of the last 256 block hashes (each as 32-byte big-endian). | +| `lastBlockTimestamp` | `uint64` | Timestamp of the last L2 block in this batch. | + +### `storageProofs[i]` + +Each entry corresponds to one requested storage slot. + +| Field | Type | Description | +|-------|------|-------------| +| `key` | `H256` | The storage slot (as provided in the input). The verifier derives the tree key as `blake2s(address_padded32_be || key)`. | +| `proof` | `object` | The proof object. The `type` field discriminates between existing and non-existing proofs (see below). | + +The `proof` object always contains a `type` field: + +- `"existing"` — the slot exists in the tree. Additional fields: `index`, `value`, `nextIndex`, `siblings`. +- `"nonExisting"` — the slot has never been written to (value is implicitly zero). Additional fields: `leftNeighbor`, `rightNeighbor`. + +#### `proof` when `type` = `"existing"` + +Returned when the storage slot has been written to at least once. + +| Field | Type | Description | +|-------|------|-------------| +| `type` | `string` | `"existing"` | +| `index` | `uint64` | The leaf index in the tree. | +| `value` | `H256` | The storage value. | +| `nextIndex` | `uint64` | The linked-list pointer to the next leaf (by key order). | +| `siblings` | `H256[]` | The Merkle path (see [Siblings](#siblings) below). | + +The leaf key used in the tree is not included explicitly — the verifier derives it as `blake2s(address_padded32_be || key)` from the `address` and `key` fields in the response. + +#### `proof` when `type` = `"nonExisting"` + +Returned when the storage slot has never been written to (value is implicitly zero). Proves non-membership by showing two consecutive leaves in the key-sorted linked list that bracket the queried key. + +| Field | Type | Description | +|-------|------|-------------| +| `type` | `string` | `"nonExisting"` | +| `leftNeighbor` | `LeafWithProof` | The leaf with the largest key smaller than the queried key. | +| `rightNeighbor` | `LeafWithProof` | The leaf with the smallest key larger than the queried key. `leftNeighbor.nextIndex` must equal `rightNeighbor.index`. | + +#### `LeafWithProof` + +Used within non-existing proofs to represent a neighbor leaf and its Merkle path. + +| Field | Type | Description | +|-------|------|-------------| +| `index` | `uint64` | The leaf index in the tree. | +| `leafKey` | `H256` | The leaf's key (the `blake2s` derived flat storage key). | +| `value` | `H256` | The leaf's value. | +| `nextIndex` | `uint64` | The linked-list pointer to the next leaf. | +| `siblings` | `H256[]` | The Merkle path (see [Siblings](#siblings) below). | + +## Tree Structure + +The state tree is a fixed-depth (64) binary Merkle tree using Blake2s-256 as the hash function. Leaves are allocated left-to-right by insertion order and linked together in a sorted linked list by key. + +### Key derivation + +The flat storage key for a slot is derived as: + +``` +flat_key = blake2s(address_padded32_be || storage_key) +``` + +where `address_padded32_be` is the 20-byte address zero-padded on the left to 32 bytes. + +### Leaf hashing + +``` +leaf_hash = blake2s(key || value || next_index_le8) +``` + +where `key` and `value` are 32 bytes each, and `next_index_le8` is the `next` pointer encoded as 8 bytes little-endian. + +An empty (unoccupied) leaf has `key = 0`, `value = 0`, `next = 0`. + +### Node hashing + +``` +node_hash = blake2s(left_child_hash || right_child_hash) +``` + +### Siblings + +The `siblings` array is an ordered list of sibling hashes forming the Merkle path from leaf to root. + +**Order.** `siblings[0]` is the sibling at the leaf level (depth 64). Subsequent entries move toward the root. A full (uncompressed) path has 64 entries, with the last entry being the sibling at depth 1 (one level below the root). At each level, if the current index is even the node is a left child; if odd it is a right child. The index is halved (integer division) after each level. + +**Empty subtree compression.** The tree has depth 64 but is sparsely populated — most subtrees are entirely empty. The hash of an empty subtree at each level is deterministic: + +``` +emptyHash[0] = blake2s(0x00{32} || 0x00{32} || 0x00{8}) // empty leaf hash (72 zero bytes) +emptyHash[i] = blake2s(emptyHash[i-1] || emptyHash[i-1]) // for i = 1..63 +``` + +If trailing siblings (toward the root) are equal to the corresponding `emptyHash` for that level, they are omitted. The verifier reconstructs them: if `siblings` has fewer than 64 entries, the missing entries at positions `len(siblings)` through `63` are filled with `emptyHash[len(siblings)]`, `emptyHash[len(siblings)+1]`, etc. + +For example, if a leaf is at index 5 in a tree with 100 occupied leaves, siblings at levels ~7 and above will all be empty subtree hashes, so the array will contain only ~7 entries instead of 64. + +## Verification + +```coq +deriveFlatKey (address, storageKey) → H256 := + blake2s(leftPad32(address) || storageKey) + +hashLeaf (leafKey, value, nextIndex) → H256 := + blake2s(leafKey || value || nextIndex.to_le_bytes(8)) + +emptyHash (0) → H256 := blake2s(0x00{72}) +emptyHash (i) → H256 := blake2s(emptyHash(i-1) || emptyHash(i-1)) + +padSiblings (siblings) → H256[64] := + siblings ++ [emptyHash(i) for i in len(siblings)..63] + +walkMerklePath (leafHash, index, siblings) → H256 := + fullPath ← padSiblings(siblings) + current ← leafHash + idx ← index + for sibling in fullPath: + current ← if even(idx) then blake2s(current || sibling) + else blake2s(sibling || current) + idx ← idx / 2 + assert idx = 0 + current + +verifyExistingProof (address, storageProof) → (H256, H256) := + let flatKey = deriveFlatKey(address, storageProof.key) in + let p = storageProof.proof in + let stateRoot = walkMerklePath(hashLeaf(flatKey, p.value, p.nextIndex), + p.index, p.siblings) in + (stateRoot, p.value) + +verifyNonExistingProof (address, storageProof) → (H256, H256) := + let flatKey = deriveFlatKey(address, storageProof.key) in + let left = storageProof.proof.leftNeighbor in + let right = storageProof.proof.rightNeighbor in + let leftRoot = walkMerklePath(hashLeaf(left.leafKey, left.value, left.nextIndex), + left.index, left.siblings) in + let rightRoot = walkMerklePath(hashLeaf(right.leafKey, right.value, right.nextIndex), + right.index, right.siblings) in + assert leftRoot = rightRoot + assert left.leafKey < flatKey < right.leafKey + assert left.nextIndex = right.index + (leftRoot, 0x00{32}) + +verifyStateCommitment (stateRoot, preimage, expectedBatchHash) := + let stateCommitment = blake2s( + stateRoot + || preimage.nextFreeSlot.to_be_bytes(8) + || preimage.blockNumber.to_be_bytes(8) + || preimage.last256BlockHashesBlake + || preimage.lastBlockTimestamp.to_be_bytes(8) + ) in + assert stateCommitment = expectedBatchHash +``` + +### Full verification + +```coq +verify (response, batchHash) := + forall storageProof in response.storageProofs: + let (stateRoot, value) = + match storageProof.proof.type with + | "existing" => verifyExistingProof(response.address, storageProof) + | "nonExisting" => verifyNonExistingProof(response.address, storageProof) + in + verifyStateCommitment(stateRoot, response.stateCommitmentPreimage, batchHash) +``` + +Where `batchHash` is `StoredBatchInfo.batchHash` from L1 for the corresponding batch. + diff --git a/zk_ee/src/common_structs/chain_state_commitment.rs b/zk_ee/src/common_structs/chain_state_commitment.rs new file mode 100644 index 000000000..edf32cc25 --- /dev/null +++ b/zk_ee/src/common_structs/chain_state_commitment.rs @@ -0,0 +1,40 @@ +use crypto::MiniDigest; + +use crate::utils::Bytes32; + +/// +/// Commitment to state that we need to keep between blocks execution: +/// - state commitment(`state_root` and `next_free_slot`) +/// - block number +/// - last 256 block hashes, previous can be "unrolled" from the last, but we commit to 256 for optimization. +/// - last block timestamp, to ensure that block timestamps are not decreasing. +/// +/// This commitment(hash of its fields) will be saved on the settlement layer. +/// With proofs, we'll ensure that the values used during block execution correspond to this commitment. +/// +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct ChainStateCommitment { + pub state_root: Bytes32, + pub next_free_slot: u64, + pub block_number: u64, + pub last_256_block_hashes_blake: Bytes32, + pub last_block_timestamp: u64, +} + +impl ChainStateCommitment { + /// + /// Calculate blake2s hash of chain state commitment. + /// + /// We are using proving friendly blake2s because this commitment will be generated and opened during proving, + /// but we don't need to open it on the settlement layer. + /// + pub fn hash(&self) -> [u8; 32] { + let mut hasher = crypto::blake2s::Blake2s256::new(); + hasher.update(self.state_root.as_u8_ref()); + hasher.update(self.next_free_slot.to_be_bytes()); + hasher.update(self.block_number.to_be_bytes()); + hasher.update(self.last_256_block_hashes_blake.as_u8_ref()); + hasher.update(self.last_block_timestamp.to_be_bytes()); + hasher.finalize() + } +} diff --git a/zk_ee/src/common_structs/mod.rs b/zk_ee/src/common_structs/mod.rs index c489c049c..cc0e62f2d 100644 --- a/zk_ee/src/common_structs/mod.rs +++ b/zk_ee/src/common_structs/mod.rs @@ -1,5 +1,6 @@ pub mod cache_record; pub mod callee_account_properties; +pub mod chain_state_commitment; pub mod da_commitment_scheme; pub mod events_storage; pub mod history_counter; @@ -18,6 +19,7 @@ pub mod warm_storage_key; pub mod warm_storage_value; pub use self::callee_account_properties::*; +pub use self::chain_state_commitment::*; pub use self::da_commitment_scheme::*; pub use self::events_storage::*; pub use self::logs_storage::*; diff --git a/zks_get_proof_verifier/Cargo.toml b/zks_get_proof_verifier/Cargo.toml new file mode 100644 index 000000000..883183a07 --- /dev/null +++ b/zks_get_proof_verifier/Cargo.toml @@ -0,0 +1,19 @@ +[package] +name = "zks_get_proof_verifier" +version.workspace = true +edition.workspace = true +authors.workspace = true +homepage.workspace = true +repository.workspace = true +license.workspace = true +keywords.workspace = true +categories.workspace = true + +[dependencies] +ruint = { workspace = true, default-features = false } +serde = { workspace = true, default-features = false, features = ["derive", "alloc"], optional = true } +const-hex = { version = "1", default-features = false, features = ["alloc"], optional = true } + +[features] +default = [] +serde = ["dep:serde", "dep:const-hex", "ruint/serde"] diff --git a/zks_get_proof_verifier/src/lib.rs b/zks_get_proof_verifier/src/lib.rs new file mode 100644 index 000000000..e3b5651d0 --- /dev/null +++ b/zks_get_proof_verifier/src/lib.rs @@ -0,0 +1,507 @@ +#![no_std] + +extern crate alloc; + +use alloc::vec::Vec; + +use ruint::aliases::B160; + +pub const ZERO_32_BYTES: [u8; 32] = [0u8; 32]; +pub const MAX_32_BYTES: [u8; 32] = [0xffu8; 32]; + +/// Preimage data required to recompute the L1 batch commitment. +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr(feature = "serde", serde(rename_all = "camelCase"))] +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +pub struct StateCommitmentPreimage { + #[cfg_attr(feature = "serde", serde(with = "serde_hex::u64"))] + pub next_free_slot: u64, + #[cfg_attr(feature = "serde", serde(with = "serde_hex::u64"))] + pub block_number: u64, + #[cfg_attr(feature = "serde", serde(with = "serde_hex::bytes32"))] + pub last256_block_hashes_blake: [u8; 32], + #[cfg_attr(feature = "serde", serde(with = "serde_hex::u64"))] + pub last_block_timestamp: u64, +} + +/// Response envelope for `zks_getProof`. +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr(feature = "serde", serde(rename_all = "camelCase"))] +#[derive(Clone, PartialEq, Eq)] +pub struct ZksGetProofResponse { + #[cfg_attr(feature = "serde", serde(with = "serde_hex::b160"))] + pub address: B160, + pub state_commitment_preimage: StateCommitmentPreimage, + pub storage_proofs: Vec, +} + +/// A proof for a single storage key. +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr(feature = "serde", serde(rename_all = "camelCase"))] +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct StorageProof { + #[cfg_attr(feature = "serde", serde(with = "serde_hex::bytes32"))] + pub key: [u8; 32], + pub proof: StorageProofType, +} + +/// A leaf and its Merkle path, used to prove non-existence. +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr(feature = "serde", serde(rename_all = "camelCase"))] +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct LeafWithProof { + #[cfg_attr(feature = "serde", serde(with = "serde_hex::u64"))] + pub index: u64, + #[cfg_attr(feature = "serde", serde(with = "serde_hex::bytes32"))] + pub leaf_key: [u8; 32], + #[cfg_attr(feature = "serde", serde(with = "serde_hex::bytes32"))] + pub value: [u8; 32], + #[cfg_attr(feature = "serde", serde(with = "serde_hex::u64"))] + pub next_index: u64, + #[cfg_attr(feature = "serde", serde(with = "serde_hex::vec_bytes32"))] + pub siblings: Vec<[u8; 32]>, +} + +/// Storage proof variants following the `zks_getProof` JSON schema. +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr( + feature = "serde", + serde(tag = "type", rename_all_fields = "camelCase") +)] +#[derive(Clone, Debug, PartialEq, Eq)] +pub enum StorageProofType { + Existing { + #[cfg_attr(feature = "serde", serde(with = "serde_hex::u64"))] + index: u64, + #[cfg_attr(feature = "serde", serde(with = "serde_hex::bytes32"))] + value: [u8; 32], + #[cfg_attr(feature = "serde", serde(rename = "nextIndex"))] + #[cfg_attr(feature = "serde", serde(with = "serde_hex::u64"))] + next_index: u64, + #[cfg_attr(feature = "serde", serde(with = "serde_hex::vec_bytes32"))] + siblings: Vec<[u8; 32]>, + }, + NonExisting { + #[cfg_attr(feature = "serde", serde(rename = "leftNeighbor"))] + left_neighbor: LeafWithProof, + #[cfg_attr(feature = "serde", serde(rename = "rightNeighbor"))] + right_neighbor: LeafWithProof, + }, +} + +#[derive(Clone, Debug, PartialEq, Eq)] +pub enum ZksGetProofVerificationError { + SiblingsTooLong { len: usize }, + NonExistingRootMismatch, + NeighborOrderInvalid, + NeighborLinkInvalid, + StateCommitmentMismatch, +} + +/// Minimal digest interface used by verifier internals. +pub trait ZksGetProofHasher { + fn update(&mut self, input: impl AsRef<[u8]>); + fn finalize_reset(&mut self) -> [u8; 32]; +} + +pub use verifier::{compute_state_commitment, verify_response}; + +impl ZksGetProofResponse { + pub fn verify_with( + &self, + expected_batch_hash: &[u8; 32], + hasher: &mut H, + ) -> Result, ZksGetProofVerificationError> { + verifier::verify_response::(self, expected_batch_hash, hasher) + } +} + +/// Verifier utilities and error types. +pub mod verifier { + use alloc::vec::Vec; + + use ruint::aliases::B160; + + use super::{ + StateCommitmentPreimage, StorageProof, StorageProofType, ZksGetProofHasher, + ZksGetProofResponse, ZksGetProofVerificationError, ZERO_32_BYTES, + }; + + #[derive(Clone, Copy, Debug)] + struct FlatStorageLeaf { + key: [u8; 32], + value: [u8; 32], + next_index: u64, + } + + fn derive_flat_storage_key( + address: &B160, + key: &[u8; 32], + hasher: &mut H, + ) -> [u8; 32] { + hasher.update([0u8; 12]); + hasher.update(address.to_be_bytes::<{ B160::BYTES }>()); + hasher.update(key); + hasher.finalize_reset() + } + + fn hash_leaf(hasher: &mut H, leaf: &FlatStorageLeaf) -> [u8; 32] { + hasher.update(leaf.key); + hasher.update(leaf.value); + hasher.update(leaf.next_index.to_le_bytes()); + hasher.finalize_reset() + } + + fn hash_node( + hasher: &mut H, + left_node: &[u8; 32], + right_node: &[u8; 32], + ) -> [u8; 32] { + hasher.update(left_node); + hasher.update(right_node); + hasher.finalize_reset() + } + + /// Computes the L1 batch commitment from a state root and its preimage. + /// Should replicate the logic in zk_ee/src/common_structs/chain_state_commitment.rs + pub fn compute_state_commitment( + hasher: &mut H, + state_root: &[u8; 32], + preimage: &StateCommitmentPreimage, + ) -> [u8; 32] { + hasher.update(state_root); + hasher.update(preimage.next_free_slot.to_be_bytes()); + hasher.update(preimage.block_number.to_be_bytes()); + hasher.update(preimage.last256_block_hashes_blake); + hasher.update(preimage.last_block_timestamp.to_be_bytes()); + hasher.finalize_reset() + } + + /// Output of verifying a single proof. + /// Note that we include the computed state root, to avoid recomputing + /// the state commitment for every proof. + struct SingleVerificationResult { + computed_root: [u8; 32], + value: [u8; 32], + } + + /// Verify a single proof, returning the leaf value and recomputed + /// state root. + /// Note: caller is expected to check that the recomputed root is + /// consistent with the expected one. + fn verify_single_proof( + address: &B160, + proof: &StorageProof, + empty_hashes: &[[u8; 32]; N], + hasher: &mut H, + ) -> Result { + let flat_key = derive_flat_storage_key(address, &proof.key, hasher); + let (state_root, value) = match &proof.proof { + StorageProofType::Existing { + index, + value, + next_index, + siblings, + } => { + let leaf = FlatStorageLeaf { + key: flat_key, + value: *value, + next_index: *next_index, + }; + let root = compute_root_from_siblings::( + hasher, + *index, + &leaf, + siblings, + empty_hashes, + )?; + (root, *value) + } + StorageProofType::NonExisting { + left_neighbor, + right_neighbor, + } => { + if !(left_neighbor.leaf_key < flat_key && flat_key < right_neighbor.leaf_key) { + return Err(ZksGetProofVerificationError::NeighborOrderInvalid); + } + if left_neighbor.next_index != right_neighbor.index { + return Err(ZksGetProofVerificationError::NeighborLinkInvalid); + } + let left_leaf = FlatStorageLeaf { + key: left_neighbor.leaf_key, + value: left_neighbor.value, + next_index: left_neighbor.next_index, + }; + let right_leaf = FlatStorageLeaf { + key: right_neighbor.leaf_key, + value: right_neighbor.value, + next_index: right_neighbor.next_index, + }; + let left_root = compute_root_from_siblings::( + hasher, + left_neighbor.index, + &left_leaf, + &left_neighbor.siblings, + empty_hashes, + )?; + let right_root = compute_root_from_siblings::( + hasher, + right_neighbor.index, + &right_leaf, + &right_neighbor.siblings, + empty_hashes, + )?; + if left_root != right_root { + return Err(ZksGetProofVerificationError::NonExistingRootMismatch); + } + (left_root, ZERO_32_BYTES) + } + }; + + Ok(SingleVerificationResult { + computed_root: state_root, + value, + }) + } + + /// Verifies all storage proofs against the expected batch hash. + pub fn verify_response( + response: &ZksGetProofResponse, + expected_batch_hash: &[u8; 32], + hasher: &mut H, + ) -> Result, ZksGetProofVerificationError> { + // Handle case for 0 proofs: + if response.storage_proofs.is_empty() { + return Ok(alloc::vec![]); + } + + let empty_hashes = compute_empty_hashes::(hasher); + let mut values = Vec::with_capacity(response.storage_proofs.len()); + + // Handle first proof (must exist due to previous check) + let SingleVerificationResult { + computed_root: first_proof_computed_root, + value, + } = verify_single_proof::( + &response.address, + &response.storage_proofs[0], + &empty_hashes, + hasher, + )?; + + // For the first proof, we recompute state commitment and check against expected batch hash + let commitment = compute_state_commitment( + hasher, + &first_proof_computed_root, + &response.state_commitment_preimage, + ); + if &commitment != expected_batch_hash { + return Err(ZksGetProofVerificationError::StateCommitmentMismatch); + } + values.push(value); + + // Now, verify all remaining proofs by checking against the + // root computed for the first one + for proof in response.storage_proofs.iter().skip(1) { + let SingleVerificationResult { + computed_root, + value, + } = verify_single_proof::(&response.address, proof, &empty_hashes, hasher)?; + if computed_root != first_proof_computed_root { + return Err(ZksGetProofVerificationError::StateCommitmentMismatch); + } + values.push(value); + } + + Ok(values) + } + + fn compute_root_from_siblings( + hasher: &mut H, + index: u64, + leaf: &FlatStorageLeaf, + siblings: &[[u8; 32]], + empty_hashes: &[[u8; 32]; N], + ) -> Result<[u8; 32], ZksGetProofVerificationError> { + if siblings.len() > N { + return Err(ZksGetProofVerificationError::SiblingsTooLong { + len: siblings.len(), + }); + } + + let mut path = [ZERO_32_BYTES; N]; + path[..siblings.len()].copy_from_slice(siblings); + path[siblings.len()..N].copy_from_slice(&empty_hashes[siblings.len()..N]); + + Ok(recompute_root_from_leaf_and_path( + hasher, index, leaf, &path, + )) + } + + fn recompute_root_from_leaf_and_path( + hasher: &mut H, + index: u64, + leaf: &FlatStorageLeaf, + path: &[[u8; 32]; N], + ) -> [u8; 32] { + let leaf_hash = hash_leaf(hasher, leaf); + + let mut current = leaf_hash; + let mut index = index; + for path in path.iter() { + let path: &[u8; 32] = path; + let (left, right) = if index & 1 == 0 { + // current is left + (¤t, path) + } else { + (path, ¤t) + }; + let next = hash_node(hasher, left, right); + current = next; + index >>= 1; + } + assert!(index == 0); + + current + } + + fn compute_empty_hashes(hasher: &mut H) -> [[u8; 32]; N] { + let mut result = [ZERO_32_BYTES; N]; + let empty_leaf = FlatStorageLeaf { + key: ZERO_32_BYTES, + value: ZERO_32_BYTES, + next_index: 0, + }; + let empty_leaf_hash = hash_leaf(hasher, &empty_leaf); + result[0] = empty_leaf_hash; + let mut previous = empty_leaf_hash; + for i in 0..(N - 1) { + let node_hash = hash_node(hasher, &previous, &previous); + result[i + 1] = node_hash; + previous = node_hash; + } + + result + } +} + +#[cfg(feature = "serde")] +mod serde_hex { + use alloc::string::String; + use alloc::vec::Vec; + + use ruint::aliases::B160; + use serde::{de::Error as _, Deserialize, Deserializer, Serialize, Serializer}; + + fn encode_hex(bytes: &[u8]) -> String { + let mut out = String::from("0x"); + out.push_str(&const_hex::encode(bytes)); + out + } + + fn decode_hex_str<'de, D>(deserializer: D, expected_len: usize) -> Result, D::Error> + where + D: Deserializer<'de>, + { + let raw = String::deserialize(deserializer)?; + let s = raw.strip_prefix("0x").unwrap_or(&raw); + let bytes = const_hex::decode(s).map_err(D::Error::custom)?; + if bytes.len() != expected_len { + return Err(D::Error::custom("invalid hex length")); + } + Ok(bytes) + } + + pub mod bytes32 { + use super::*; + + pub fn serialize(value: &[u8; 32], serializer: S) -> Result + where + S: Serializer, + { + serializer.serialize_str(&encode_hex(value)) + } + + pub fn deserialize<'de, D>(deserializer: D) -> Result<[u8; 32], D::Error> + where + D: Deserializer<'de>, + { + let bytes = decode_hex_str(deserializer, 32)?; + let mut array = [0u8; 32]; + array.copy_from_slice(&bytes); + Ok(array) + } + } + + pub mod b160 { + use super::*; + + pub fn serialize(value: &B160, serializer: S) -> Result + where + S: Serializer, + { + serializer.serialize_str(&encode_hex(&value.to_be_bytes::<{ B160::BYTES }>())) + } + + pub fn deserialize<'de, D>(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let bytes = decode_hex_str(deserializer, 20)?; + let mut array = [0u8; 20]; + array.copy_from_slice(&bytes); + Ok(B160::from_be_bytes(array)) + } + } + + pub mod u64 { + use super::*; + + pub fn serialize(value: &u64, serializer: S) -> Result + where + S: Serializer, + { + serializer.serialize_str(&alloc::format!("0x{value:x}")) + } + + pub fn deserialize<'de, D>(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let raw = String::deserialize(deserializer)?; + let s = raw.strip_prefix("0x").unwrap_or(&raw); + u64::from_str_radix(s, 16).map_err(D::Error::custom) + } + } + + pub mod vec_bytes32 { + use super::*; + + pub fn serialize(value: &[[u8; 32]], serializer: S) -> Result + where + S: Serializer, + { + let encoded: Vec = value.iter().map(|item| encode_hex(item)).collect(); + encoded.serialize(serializer) + } + + pub fn deserialize<'de, D>(deserializer: D) -> Result, D::Error> + where + D: Deserializer<'de>, + { + let raw: Vec = Vec::deserialize(deserializer)?; + let mut out = Vec::with_capacity(raw.len()); + for item in raw { + let s = item.strip_prefix("0x").unwrap_or(&item); + let bytes = const_hex::decode(s).map_err(D::Error::custom)?; + if bytes.len() != 32 { + return Err(D::Error::custom("invalid hex length")); + } + let mut array = [0u8; 32]; + array.copy_from_slice(&bytes); + out.push(array); + } + Ok(out) + } + } +}