diff --git a/host/config/config.devnet.json b/host/config/config.devnet.json index ab155fa36..bf5b34f1e 100644 --- a/host/config/config.devnet.json +++ b/host/config/config.devnet.json @@ -30,11 +30,10 @@ "execution_po2": 20 }, "sp1": { - "recursion": "core", "prover": "network", "verify": false }, "native": { "json_guest_input": null } -} \ No newline at end of file +} diff --git a/host/config/config.json b/host/config/config.json index 1bbaee34f..fc92346cd 100644 --- a/host/config/config.json +++ b/host/config/config.json @@ -20,7 +20,6 @@ "execution_po2": 20 }, "sp1": { - "recursion": "core", "prover": "network", "verify": false }, diff --git a/host/config/config.sgx.json b/host/config/config.sgx.json index 4f58a0533..12cc263ba 100644 --- a/host/config/config.sgx.json +++ b/host/config/config.sgx.json @@ -30,11 +30,10 @@ "verify": true }, "sp1": { - "recursion": "core", "prover": "network", "verify": true }, "native": { "json_guest_input": null } -} \ No newline at end of file +} diff --git a/host/config/config.taiko_hekla.json b/host/config/config.taiko_hekla.json index fb08954ba..51d2c4d67 100644 --- a/host/config/config.taiko_hekla.json +++ b/host/config/config.taiko_hekla.json @@ -17,7 +17,6 @@ "execution_po2": 20 }, "sp1": { - "recursion": "core", "prover": "network", "verify": true }, @@ -33,4 +32,4 @@ "native": { "json_guest_input": null } -} \ No newline at end of file +} diff --git a/host/config/config.taiko_mainnet.json b/host/config/config.taiko_mainnet.json index 6dee594f6..c039c8b4e 100644 --- a/host/config/config.taiko_mainnet.json +++ b/host/config/config.taiko_mainnet.json @@ -17,7 +17,6 @@ "execution_po2": 20 }, "sp1": { - "recursion": "core", "prover": "network", "verify": true }, @@ -33,4 +32,4 @@ "native": { "json_guest_input": null } -} \ No newline at end of file +} diff --git a/host/src/server/api/v2/mod.rs b/host/src/server/api/v2/mod.rs index 1b4208a51..0913b5ad5 100644 --- a/host/src/server/api/v2/mod.rs +++ b/host/src/server/api/v2/mod.rs @@ -69,6 +69,8 @@ pub enum Status { Ok { #[serde(with = "raiko_lib::proof_type::lowercase")] proof_type: ProofType, + #[serde(skip_serializing_if = "Option::is_none")] + batch_id: Option, data: ProofResponse, }, Error { diff --git a/host/src/server/api/v2/proof/mod.rs b/host/src/server/api/v2/proof/mod.rs index 114f02af9..38c5ff113 100644 --- a/host/src/server/api/v2/proof/mod.rs +++ b/host/src/server/api/v2/proof/mod.rs @@ -9,7 +9,7 @@ use raiko_tasks::TaskStatus; use serde_json::Value; use utoipa::OpenApi; -use crate::server::utils::{draw_for_zk_any_request, fulfill_sp1_params, is_zk_any_request}; +use crate::server::utils::{draw_for_zk_any_request, is_zk_any_request}; use crate::{ interfaces::HostResult, metrics::{inc_current_req, inc_guest_req_count, inc_host_req_count}, @@ -39,16 +39,9 @@ pub mod report; /// - sgx - uses the sgx environment to construct a block and produce proof of execution /// - sp1 - uses the sp1 prover /// - risc0 - uses the risc0 prover -async fn proof_handler( - State(actor): State, - Json(mut req): Json, -) -> HostResult { +async fn proof_handler(State(actor): State, Json(req): Json) -> HostResult { inc_current_req(); - if is_zk_any_request(&req) { - fulfill_sp1_params(&mut req); - } - // Override the existing proof request config from the config file and command line // options with the request from the client. let mut config = actor.default_request_config().clone(); @@ -61,6 +54,7 @@ async fn proof_handler( None => { return Ok(Status::Ok { proof_type: ProofType::Native, + batch_id: None, data: ProofResponse::Status { status: TaskStatus::ZKAnyNotDrawn, }, @@ -126,10 +120,10 @@ async fn proof_handler( .into(); let result = crate::server::prove(&actor, request_key, request_entity).await; - Ok(to_v2_status(proof_type, result)) + Ok(to_v2_status(proof_type, None, result)) } - Ok(_) => Ok(to_v2_status(proof_type, result)), - Err(_) => Ok(to_v2_status(proof_type, result)), + Ok(_) => Ok(to_v2_status(proof_type, None, result)), + Err(_) => Ok(to_v2_status(proof_type, None, result)), } } diff --git a/host/src/server/api/v3/proof/aggregate/mod.rs b/host/src/server/api/v3/proof/aggregate/mod.rs index 3f6745f9e..1f8749c1e 100644 --- a/host/src/server/api/v3/proof/aggregate/mod.rs +++ b/host/src/server/api/v3/proof/aggregate/mod.rs @@ -67,7 +67,7 @@ async fn aggregation_handler( .into(); let result = crate::server::prove(&actor, agg_request_key, agg_request_entity).await; - Ok(to_v3_status(proof_type, result)) + Ok(to_v3_status(proof_type, None, result)) } #[derive(OpenApi)] diff --git a/host/src/server/api/v3/proof/batch.rs b/host/src/server/api/v3/proof/batch.rs index f522ffb45..fef9264f4 100644 --- a/host/src/server/api/v3/proof/batch.rs +++ b/host/src/server/api/v3/proof/batch.rs @@ -4,12 +4,12 @@ use crate::{ api::v3::{ProofResponse, Status}, handler::prove_many, prove_aggregation, - utils::{is_zk_any_request, to_v3_status}, + utils::{draw_for_zk_any_batch_request, is_zk_any_request, to_v3_status}, }, }; use axum::{extract::State, routing::post, Json, Router}; use raiko_core::{ - interfaces::{BatchMetadata, BatchProofRequest, BatchProofRequestOpt}, + interfaces::{BatchMetadata, BatchProofRequest, BatchProofRequestOpt, RaikoError}, merge, }; use raiko_lib::{proof_type::ProofType, prover::Proof}; @@ -41,20 +41,46 @@ async fn batch_handler( State(actor): State, Json(batch_request_opt): Json, ) -> HostResult { - if is_zk_any_request(&batch_request_opt) { - return Ok(Status::Ok { - proof_type: ProofType::Native, - data: ProofResponse::Status { - status: TaskStatus::ZKAnyNotDrawn, - }, - }); - } + tracing::debug!( + "Received batch request: {}", + serde_json::to_string(&batch_request_opt)? + ); let batch_request = { // Override the existing proof request config from the config file and command line // options with the request from the client, and convert to a BatchProofRequest. let mut opts = serde_json::to_value(actor.default_request_config())?; merge(&mut opts, &batch_request_opt); + + let first_batch_id = { + let batches = opts["batches"] + .as_array() + .ok_or(RaikoError::InvalidRequestConfig( + "Missing batches".to_string(), + ))?; + let first_batch = batches.first().ok_or(RaikoError::InvalidRequestConfig( + "batches is empty".to_string(), + ))?; + let first_batch_id = first_batch["batch_id"].as_u64().expect("checked above"); + first_batch_id + }; + + // For zk_any request, draw zk proof type based on the block hash. + if is_zk_any_request(&opts) { + match draw_for_zk_any_batch_request(&actor, &opts).await? { + Some(proof_type) => opts["proof_type"] = serde_json::to_value(proof_type).unwrap(), + None => { + return Ok(Status::Ok { + proof_type: ProofType::Native, + batch_id: Some(first_batch_id), + data: ProofResponse::Status { + status: TaskStatus::ZKAnyNotDrawn, + }, + }); + } + } + } + let batch_request_opt: BatchProofRequestOpt = serde_json::from_value(opts)?; let batch_request: BatchProofRequest = batch_request_opt.try_into()?; @@ -188,7 +214,12 @@ async fn batch_handler( }) } }; - Ok(to_v3_status(batch_request.proof_type, result)) + tracing::debug!("Batch proof result: {}", serde_json::to_string(&result)?); + Ok(to_v3_status( + batch_request.proof_type, + Some(batch_request.batches.first().unwrap().batch_id), + result, + )) } #[derive(OpenApi)] diff --git a/host/src/server/api/v3/proof/mod.rs b/host/src/server/api/v3/proof/mod.rs index f1d905a0d..1c1391b54 100644 --- a/host/src/server/api/v3/proof/mod.rs +++ b/host/src/server/api/v3/proof/mod.rs @@ -115,7 +115,7 @@ async fn proof_handler( sub_request_entities, ) .await; - Ok(to_v3_status(proof_type, result)) + Ok(to_v3_status(proof_type, None, result)) } #[derive(OpenApi)] diff --git a/host/src/server/utils.rs b/host/src/server/utils.rs index 83b7e9e2c..fb04de4a1 100644 --- a/host/src/server/utils.rs +++ b/host/src/server/utils.rs @@ -9,10 +9,15 @@ use raiko_reqpool::Status; use raiko_tasks::TaskStatus; use serde_json::Value; -pub fn to_v2_status(proof_type: ProofType, result: Result) -> v2::Status { +pub fn to_v2_status( + proof_type: ProofType, + batch_id: Option, + result: Result, +) -> v2::Status { match result { Ok(status) => v2::Status::Ok { proof_type, + batch_id, data: { match status { Status::Registered => v2::ProofResponse::Status { @@ -57,8 +62,12 @@ pub fn to_v2_cancel_status(result: Result) -> v2::CancelStatus { } // TODO: remove the staled interface -pub fn to_v3_status(proof_type: ProofType, result: Result) -> v3::Status { - to_v2_status(proof_type, result) +pub fn to_v3_status( + proof_type: ProofType, + batch_id: Option, + result: Result, +) -> v3::Status { + to_v2_status(proof_type, batch_id, result) } pub fn to_v3_cancel_status(result: Result) -> v3::CancelStatus { @@ -90,29 +99,29 @@ pub async fn draw_for_zk_any_request( Ok(actor.draw(&blockhash)) } -pub fn fulfill_sp1_params(req: &mut Value) { - let zk_any_opts = req["zk_any"].as_object().clone(); - let sp1_recursion = match zk_any_opts { - None => serde_json::Value::String("plonk".to_string()), - Some(zk_any) => { - let aggregation = zk_any["aggregation"].as_bool().unwrap_or(false); - if aggregation { - serde_json::Value::String("compressed".to_string()) - } else { - serde_json::Value::String("plonk".to_string()) - } - } - }; - - let sp1_opts = req["sp1"].as_object_mut(); - match sp1_opts { - None => { - let mut sp1_opts = serde_json::Map::new(); - sp1_opts.insert("recursion".to_string(), sp1_recursion); - req["sp1"] = serde_json::Value::Object(sp1_opts); - } - Some(sp1_opts) => { - sp1_opts.insert("recursion".to_string(), sp1_recursion); - } - } +pub async fn draw_for_zk_any_batch_request( + actor: &Actor, + batch_proof_request_opt: &Value, +) -> HostResult> { + let l1_network = + batch_proof_request_opt["l1_network"] + .as_str() + .ok_or(RaikoError::InvalidRequestConfig( + "Missing network".to_string(), + ))?; + let batches = + batch_proof_request_opt["batches"] + .as_array() + .ok_or(RaikoError::InvalidRequestConfig( + "Missing batches".to_string(), + ))?; + let first_batch = batches.first().ok_or(RaikoError::InvalidRequestConfig( + "batches is empty".to_string(), + ))?; + let l1_inclusion_block_number = first_batch["l1_inclusion_block_number"].as_u64().ok_or( + RaikoError::InvalidRequestConfig("Missing l1_inclusion_block_number".to_string()), + )?; + let (_, blockhash) = + get_task_data(&l1_network, l1_inclusion_block_number, actor.chain_specs()).await?; + Ok(actor.draw(&blockhash)) } diff --git a/provers/risc0/builder/src/main.rs b/provers/risc0/builder/src/main.rs index f1eb940f2..03ef4a8bd 100644 --- a/provers/risc0/builder/src/main.rs +++ b/provers/risc0/builder/src/main.rs @@ -6,7 +6,7 @@ use std::path::PathBuf; fn main() { let pipeline = Risc0Pipeline::new("provers/risc0/guest", "release"); pipeline.bins( - &["risc0-guest", "risc0-aggregation"], + &["risc0-guest", "risc0-aggregation", "risc0-batch"], "provers/risc0/driver/src/methods", ); #[cfg(feature = "test")] diff --git a/provers/risc0/driver/src/bonsai.rs b/provers/risc0/driver/src/bonsai.rs index 11c85119b..544077711 100644 --- a/provers/risc0/driver/src/bonsai.rs +++ b/provers/risc0/driver/src/bonsai.rs @@ -1,5 +1,4 @@ use crate::{ - methods::risc0_guest::RISC0_GUEST_ID, snarks::{stark2snark, verify_groth16_from_snark_receipt}, Risc0Response, }; @@ -313,8 +312,10 @@ pub async fn bonsai_stark_to_snark( stark_uuid: String, stark_receipt: Receipt, input: B256, + elf: &[u8], ) -> ProverResult { - let image_id = Digest::from(RISC0_GUEST_ID); + let image_id = risc0_zkvm::compute_image_id(elf) + .map_err(|e| ProverError::GuestError(format!("Failed to compute image id: {e:?}")))?; let (snark_uuid, snark_receipt) = stark2snark( image_id, stark_uuid.clone(), diff --git a/provers/risc0/driver/src/lib.rs b/provers/risc0/driver/src/lib.rs index 1dff55865..3dc735e69 100644 --- a/provers/risc0/driver/src/lib.rs +++ b/provers/risc0/driver/src/lib.rs @@ -3,8 +3,8 @@ #[cfg(feature = "bonsai-auto-scaling")] use crate::bonsai::auto_scaling::shutdown_bonsai; use crate::{ - methods::risc0_aggregation::RISC0_AGGREGATION_ELF, - methods::risc0_guest::{RISC0_GUEST_ELF, RISC0_GUEST_ID}, + methods::risc0_aggregation::RISC0_AGGREGATION_ELF, methods::risc0_batch::RISC0_BATCH_ELF, + methods::risc0_guest::RISC0_GUEST_ELF, }; use alloy_primitives::{hex::ToHexExt, B256}; use bonsai::{cancel_proof, maybe_prove}; @@ -18,8 +18,10 @@ use raiko_lib::{ prover::{IdStore, IdWrite, Proof, ProofKey, Prover, ProverConfig, ProverError, ProverResult}, }; use risc0_zkvm::{ - compute_image_id, default_prover, serde::to_vec, sha::Digestible, ExecutorEnv, ProverOpts, - Receipt, + compute_image_id, default_prover, + serde::to_vec, + sha::{Digest, Digestible}, + ExecutorEnv, ProverOpts, Receipt, }; use serde::{Deserialize, Serialize}; use serde_with::serde_as; @@ -93,7 +95,7 @@ impl Prover for Risc0Prover { .await?; let proof_gen_result = if config.snark && config.bonsai { - bonsai::bonsai_stark_to_snark(uuid, receipt, output.hash) + bonsai::bonsai_stark_to_snark(uuid, receipt, output.hash, RISC0_GUEST_ELF) .await .map(|r0_response| r0_response.into()) .map_err(|e| ProverError::GuestError(e.to_string())) @@ -149,11 +151,16 @@ impl Prover for Risc0Prover { .iter() .map(|proof| proof.input.unwrap()) .collect::>(); + + let input_proof_hex_str = input.proofs[0].proof.as_ref().unwrap(); + let input_proof_bytes = hex::decode(&input_proof_hex_str[2..]).unwrap(); + let input_image_id_bytes: [u8; 32] = input_proof_bytes[32..64].try_into().unwrap(); + let input_proof_image_id = Digest::from(input_image_id_bytes); let input = ZkAggregationGuestInput { - image_id: RISC0_GUEST_ID, + image_id: input_proof_image_id.as_words().try_into().unwrap(), block_inputs, }; - info!("Start aggregate proofs"); + // add_assumption makes the receipt to be verified available to the prover. let env = { let mut env = ExecutorEnv::builder(); @@ -173,10 +180,9 @@ impl Prover for Risc0Prover { "Generate aggregation receipt journal: {:?}", alloy_primitives::hex::encode_prefixed(receipt.journal.bytes.clone()) ); - let block_proof_image_id = compute_image_id(RISC0_GUEST_ELF).unwrap(); let aggregation_image_id = compute_image_id(RISC0_AGGREGATION_ELF).unwrap(); let proof_data = snarks::verify_aggregation_groth16_proof( - block_proof_image_id, + input_proof_image_id, aggregation_image_id, receipt.clone(), ) @@ -223,12 +229,60 @@ impl Prover for Risc0Prover { async fn batch_run( &self, - _input: GuestBatchInput, - _output: &GuestBatchOutput, - _config: &ProverConfig, - _store: Option<&mut dyn IdWrite>, + input: GuestBatchInput, + output: &GuestBatchOutput, + config: &ProverConfig, + id_store: Option<&mut dyn IdWrite>, ) -> ProverResult { - unimplemented!(); + let mut id_store = id_store; + let config = Risc0Param::deserialize(config.get("risc0").unwrap()).unwrap(); + let proof_key = ( + input.taiko.chain_spec.chain_id, + input.taiko.batch_id, + output.hash, + ProofType::Risc0 as u8, + ); + + let encoded_input = to_vec(&input).expect("Could not serialize proving input!"); + + let (uuid, receipt) = maybe_prove::( + &config, + encoded_input, + RISC0_BATCH_ELF, + &output.hash, + (Vec::::new(), Vec::new()), + proof_key, + &mut id_store, + ) + .await?; + + let proof_gen_result = if config.snark && config.bonsai { + bonsai::bonsai_stark_to_snark(uuid, receipt, output.hash, RISC0_BATCH_ELF) + .await + .map(|r0_response| r0_response.into()) + .map_err(|e| ProverError::GuestError(e.to_string())) + } else { + if !config.snark { + warn!("proof is not in snark mode, please check."); + } + Ok(Risc0Response { + proof: receipt.journal.encode_hex_with_prefix(), + receipt: serde_json::to_string(&receipt).unwrap(), + uuid, + input: output.hash, + } + .into()) + }; + + #[cfg(feature = "bonsai-auto-scaling")] + if config.bonsai { + // shutdown bonsai + shutdown_bonsai() + .await + .map_err(|e| ProverError::GuestError(e.to_string()))?; + } + + proof_gen_result } } diff --git a/provers/risc0/driver/src/methods/mod.rs b/provers/risc0/driver/src/methods/mod.rs index 19219d8af..3bcc159d0 100644 --- a/provers/risc0/driver/src/methods/mod.rs +++ b/provers/risc0/driver/src/methods/mod.rs @@ -1,4 +1,5 @@ pub mod risc0_aggregation; +pub mod risc0_batch; pub mod risc0_guest; // To build the following `$ cargo run --features test,bench --bin risc0-builder` diff --git a/provers/risc0/driver/src/methods/risc0_aggregation.rs b/provers/risc0/driver/src/methods/risc0_aggregation.rs index 642ac91a5..80bfb0a5f 100644 --- a/provers/risc0/driver/src/methods/risc0_aggregation.rs +++ b/provers/risc0/driver/src/methods/risc0_aggregation.rs @@ -1,5 +1,5 @@ pub const RISC0_AGGREGATION_ELF: &[u8] = include_bytes!("../../../guest/target/riscv32im-risc0-zkvm-elf/release/risc0-aggregation"); pub const RISC0_AGGREGATION_ID: [u32; 8] = [ - 757572567, 2963367168, 3257289195, 2520060355, 1598050287, 3638503613, 1461238162, 2044849682, + 3693181676, 1423972539, 2575584598, 1469200887, 651718346, 3917396100, 60015622, 535591167, ]; diff --git a/provers/risc0/driver/src/methods/risc0_batch.rs b/provers/risc0/driver/src/methods/risc0_batch.rs new file mode 100644 index 000000000..5afd890db --- /dev/null +++ b/provers/risc0/driver/src/methods/risc0_batch.rs @@ -0,0 +1,5 @@ +pub const RISC0_BATCH_ELF: &[u8] = + include_bytes!("../../../guest/target/riscv32im-risc0-zkvm-elf/release/risc0-batch"); +pub const RISC0_BATCH_ID: [u32; 8] = [ + 1585630346, 345989788, 4151101683, 819276391, 4218347574, 861965425, 3025423219, 3488057193, +]; diff --git a/provers/risc0/driver/src/methods/risc0_guest.rs b/provers/risc0/driver/src/methods/risc0_guest.rs index 94f983180..34e577244 100644 --- a/provers/risc0/driver/src/methods/risc0_guest.rs +++ b/provers/risc0/driver/src/methods/risc0_guest.rs @@ -1,5 +1,5 @@ pub const RISC0_GUEST_ELF: &[u8] = include_bytes!("../../../guest/target/riscv32im-risc0-zkvm-elf/release/risc0-guest"); pub const RISC0_GUEST_ID: [u32; 8] = [ - 1689653193, 2796478021, 3874123379, 560216071, 3867155830, 2784172499, 3235388420, 507179944, + 3813617258, 1499495912, 1722814953, 1786341610, 888187967, 1145233772, 2032049967, 243991141, ]; diff --git a/provers/risc0/guest/Cargo.toml b/provers/risc0/guest/Cargo.toml index c78d247fd..c100da0cb 100644 --- a/provers/risc0/guest/Cargo.toml +++ b/provers/risc0/guest/Cargo.toml @@ -13,6 +13,10 @@ path = "src/zk_op.rs" name = "risc0-aggregation" path = "src/aggregation.rs" +[[bin]] +name = "risc0-batch" +path = "src/batch.rs" + [[bin]] name = "sha256" path = "src/benchmark/sha256.rs" diff --git a/provers/risc0/guest/src/batch.rs b/provers/risc0/guest/src/batch.rs new file mode 100644 index 000000000..509c240fa --- /dev/null +++ b/provers/risc0/guest/src/batch.rs @@ -0,0 +1,39 @@ +#![no_main] +harness::entrypoint!(main, tests, zk_op::tests); +use raiko_lib::{ + builder::calculate_batch_blocks_final_header, input::GuestBatchInput, proof_type::ProofType, + protocol_instance::ProtocolInstance, +}; +use revm_precompile::zk_op::ZkOperation; +use risc0_zkvm::guest::env; +use zk_op::Risc0Operator; + +pub mod mem; + +pub use mem::*; + +fn main() { + let batch_input: GuestBatchInput = env::read(); + + revm_precompile::zk_op::ZKVM_OPERATOR.get_or_init(|| Box::new(Risc0Operator {})); + revm_precompile::zk_op::ZKVM_OPERATIONS + .set(Box::new(vec![ZkOperation::Sha256, ZkOperation::Secp256k1])) + .expect("Failed to set ZkvmOperations"); + + let final_blocks = calculate_batch_blocks_final_header(&batch_input); + let pi = ProtocolInstance::new_batch(&batch_input, final_blocks, ProofType::Risc0) + .unwrap() + .instance_hash(); + + env::commit(&pi); +} + +harness::zk_suits!( + pub mod tests { + #[test] + pub fn test_build_from_mock_input() { + // Todo: impl mock input for static unit test + assert_eq!(1, 1); + } + } +); diff --git a/provers/sp1/builder/src/main.rs b/provers/sp1/builder/src/main.rs index fe696594e..114c35222 100644 --- a/provers/sp1/builder/src/main.rs +++ b/provers/sp1/builder/src/main.rs @@ -5,7 +5,10 @@ use std::path::PathBuf; fn main() { let pipeline = Sp1Pipeline::new("provers/sp1/guest", "release"); - pipeline.bins(&["sp1-guest", "sp1-aggregation"], "provers/sp1/guest/elf"); + pipeline.bins( + &["sp1-guest", "sp1-aggregation", "sp1-batch"], + "provers/sp1/guest/elf", + ); #[cfg(feature = "test")] pipeline.tests(&["sp1-guest"], "provers/sp1/guest/elf"); #[cfg(feature = "bench")] diff --git a/provers/sp1/driver/src/lib.rs b/provers/sp1/driver/src/lib.rs index 9f98a0226..bc02fc890 100644 --- a/provers/sp1/driver/src/lib.rs +++ b/provers/sp1/driver/src/lib.rs @@ -21,13 +21,14 @@ use sp1_sdk::{ }; use sp1_sdk::{HashableKey, ProverClient, SP1Stdin}; use std::{borrow::BorrowMut, env, sync::Arc, time::Duration}; -use tracing::{debug, error, info}; +use tracing::{debug, info}; mod proof_verify; use proof_verify::remote_contract_verify::verify_sol_by_contract_call; pub const ELF: &[u8] = include_bytes!("../../guest/elf/sp1-guest"); pub const AGGREGATION_ELF: &[u8] = include_bytes!("../../guest/elf/sp1-aggregation"); +pub const BATCH_ELF: &[u8] = include_bytes!("../../guest/elf/sp1-batch"); #[serde_as] #[derive(Clone, Debug, Serialize, Deserialize)] @@ -110,6 +111,7 @@ struct Sp1ProverClient { //TODO: use prover object to save such local storage members. static BLOCK_PROOF_CLIENT: Lazy> = Lazy::new(DashMap::new); static AGGREGATION_CLIENT: Lazy> = Lazy::new(DashMap::new); +static BATCH_PROOF_CLIENT: Lazy> = Lazy::new(DashMap::new); impl Prover for Sp1Prover { async fn run( @@ -119,7 +121,11 @@ impl Prover for Sp1Prover { config: &ProverConfig, id_store: Option<&mut dyn IdWrite>, ) -> ProverResult { - let param = Sp1Param::deserialize(config.get("sp1").unwrap()).unwrap(); + let mut param = Sp1Param::deserialize(config.get("sp1").unwrap()).unwrap(); + + // TODO: remove param.recursion, hardcode to Plonk + param.recursion = RecursionMode::Compressed; + let mode = param.prover.clone().unwrap_or_else(get_env_mock); println!("param: {param:?}"); @@ -198,8 +204,8 @@ impl Prover for Sp1Prover { .await?; } info!( - "Sp1 Prover: block {:?} - proof id {proof_id:?}", - output.header.number + "Sp1: network proof id: {:?} for block {:?}", + proof_id, output.header.number ); network_client .wait_proof(proof_id.clone(), Some(Duration::from_secs(3600))) @@ -288,7 +294,11 @@ impl Prover for Sp1Prover { config: &ProverConfig, _store: Option<&mut dyn IdWrite>, ) -> ProverResult { - let param = Sp1Param::deserialize(config.get("sp1").unwrap()).unwrap(); + let mut param = Sp1Param::deserialize(config.get("sp1").unwrap()).unwrap(); + + // TODO: remove param.recursion, hardcode to Plonk + param.recursion = RecursionMode::Plonk; + let mode = param.prover.clone().unwrap_or_else(get_env_mock); let block_inputs: Vec = input .proofs @@ -321,7 +331,7 @@ impl Prover for Sp1Prover { stdin.write_proof(*block_proof, stark_vk.clone()); } _ => { - error!("unsupported proof type for aggregation: {sp1_proof:?}"); + tracing::error!("unsupported proof type for aggregation: {sp1_proof:?}"); } } } @@ -389,6 +399,7 @@ impl Prover for Sp1Prover { .map_err(|e| { ProverError::GuestError(format!("Sp1: network proving failed: {e}")) })?; + info!("Sp1: network proof id: {proof_id:?} for aggregation"); network_client .wait_proof(proof_id.clone(), Some(Duration::from_secs(3600))) .await @@ -432,12 +443,150 @@ impl Prover for Sp1Prover { async fn batch_run( &self, - _input: GuestBatchInput, - _output: &GuestBatchOutput, - _config: &ProverConfig, - _store: Option<&mut dyn IdWrite>, + input: GuestBatchInput, + output: &GuestBatchOutput, + config: &ProverConfig, + id_store: Option<&mut dyn IdWrite>, ) -> ProverResult { - unimplemented!(); + let mut param = Sp1Param::deserialize(config.get("sp1").unwrap()).unwrap(); + + // TODO: remove param.recursion, hardcode to Compressed + param.recursion = RecursionMode::Compressed; + + let mode = param.prover.clone().unwrap_or_else(get_env_mock); + + println!("batch_run param: {param:?}"); + let mut stdin = SP1Stdin::new(); + stdin.write(&input); + + let Sp1ProverClient { + client, + pk, + vk, + network_client, + } = BATCH_PROOF_CLIENT + .entry(mode.clone()) + .or_insert_with(|| { + let network_client = Arc::new(ProverClient::builder().network().build()); + let base_client: Box> = match mode { + ProverMode::Mock => Box::new(ProverClient::builder().mock().build()), + ProverMode::Local => Box::new(ProverClient::builder().cpu().build()), + ProverMode::Network => Box::new(ProverClient::builder().network().build()), + }; + + let client = Arc::new(base_client); + let (pk, vk) = client.setup(BATCH_ELF); + info!( + "new client and setup() for batch {:?}.", + input.taiko.batch_id + ); + Sp1ProverClient { + client, + network_client, + pk, + vk, + } + }) + .clone(); + + info!( + "Sp1 Prover: batch {:?} with vk {:?}, output.hash: {}", + input.taiko.batch_id, + vk.bytes32(), + output.hash + ); + + let prove_result = if !matches!(mode, ProverMode::Network) { + debug!("Proving locally with recursion mode: {:?}", param.recursion); + let prove_mode = match param.recursion { + RecursionMode::Core => SP1ProofMode::Core, + RecursionMode::Compressed => SP1ProofMode::Compressed, + RecursionMode::Plonk => SP1ProofMode::Plonk, + }; + client + .prove(&pk, &stdin, prove_mode) + .map_err(|e| ProverError::GuestError(format!("Sp1: local proving failed: {e}")))? + } else { + let proof_id = network_client + .prove(&pk, &stdin) + .mode(param.recursion.clone().into()) + .cycle_limit(1_000_000_000_000) + .skip_simulation(true) + .strategy(FulfillmentStrategy::Reserved) + .request_async() + .await + .map_err(|e| { + ProverError::GuestError(format!("Sp1: requesting proof failed: {e}")) + })?; + if let Some(id_store) = id_store { + id_store + .store_id( + ( + input.taiko.chain_spec.chain_id, + input.taiko.batch_id, + output.hash, + ProofType::Sp1 as u8, + ), + proof_id.clone().to_string(), + ) + .await?; + } + info!( + "Sp1 Prover: batch {:?} - proof id {proof_id:?}", + input.taiko.batch_id + ); + network_client + .wait_proof(proof_id.clone(), Some(Duration::from_secs(3600))) + .await + .map_err(|e| ProverError::GuestError(format!("Sp1: network proof failed {e:?}")))? + }; + + let proof_bytes = match param.recursion { + RecursionMode::Compressed => { + info!("Compressed proof is used in aggregation mode only"); + vec![] + } + _ => prove_result.bytes(), + }; + if param.verify && !proof_bytes.is_empty() { + let time = Measurement::start("verify", false); + let pi_hash = prove_result + .clone() + .borrow_mut() + .public_values + .read::<[u8; 32]>(); + let fixture = RaikoProofFixture { + vkey: vk.bytes32(), + public_values: B256::from_slice(&pi_hash).to_string(), + proof: proof_bytes.clone(), + }; + + verify_sol_by_contract_call(&fixture).await?; + time.stop_with("==> Verification complete"); + } + + let proof_string = (!proof_bytes.is_empty()).then_some( + // 0x + 64 bytes of the vkey + the proof + // vkey itself contains 0x prefix + format!( + "{}{}", + vk.bytes32(), + reth_primitives::hex::encode(proof_bytes) + ), + ); + + info!( + "Sp1 Prover: batch {:?} completed! proof: {proof_string:?}", + input.taiko.batch_id, + ); + Ok::<_, ProverError>( + Sp1Response { + proof: proof_string, + sp1_proof: Some(prove_result), + vkey: Some(vk.clone()), + } + .into(), + ) } } diff --git a/provers/sp1/guest/Cargo.toml b/provers/sp1/guest/Cargo.toml index c8139dc23..43ec63a04 100644 --- a/provers/sp1/guest/Cargo.toml +++ b/provers/sp1/guest/Cargo.toml @@ -13,6 +13,10 @@ path = "src/zk_op.rs" name = "sp1-aggregation" path = "src/aggregation.rs" +[[bin]] +name = "sp1-batch" +path = "src/batch.rs" + [[bin]] name = "sha256" path = "src/benchmark/sha256.rs" diff --git a/provers/sp1/guest/elf/sp1-aggregation b/provers/sp1/guest/elf/sp1-aggregation index 10d1e8938..c3f4bcf34 100755 Binary files a/provers/sp1/guest/elf/sp1-aggregation and b/provers/sp1/guest/elf/sp1-aggregation differ diff --git a/provers/sp1/guest/elf/sp1-batch b/provers/sp1/guest/elf/sp1-batch new file mode 100755 index 000000000..cf195067b Binary files /dev/null and b/provers/sp1/guest/elf/sp1-batch differ diff --git a/provers/sp1/guest/elf/sp1-guest b/provers/sp1/guest/elf/sp1-guest index abe7f8c81..a7ec1636b 100755 Binary files a/provers/sp1/guest/elf/sp1-guest and b/provers/sp1/guest/elf/sp1-guest differ diff --git a/provers/sp1/guest/src/batch.rs b/provers/sp1/guest/src/batch.rs new file mode 100644 index 000000000..c5d7f9199 --- /dev/null +++ b/provers/sp1/guest/src/batch.rs @@ -0,0 +1,59 @@ +#![no_main] +sp1_zkvm::entrypoint!(main); + +use raiko_lib::{ + builder::calculate_batch_blocks_final_header, input::GuestBatchInput, proof_type::ProofType, + protocol_instance::ProtocolInstance, CycleTracker, +}; + +pub mod sys; +pub use sys::*; + +pub fn main() { + let mut ct = CycleTracker::start("input"); + let input = sp1_zkvm::io::read_vec(); + let batch_input = bincode::deserialize::(&input).unwrap(); + ct.end(); + + ct = CycleTracker::start("calculate_batch_blocks_final_header"); + let final_blocks = calculate_batch_blocks_final_header(&batch_input); + ct.end(); + + ct = CycleTracker::start("batch_instance_hash"); + let pi = ProtocolInstance::new_batch(&batch_input, final_blocks, ProofType::Sp1) + .unwrap() + .instance_hash(); + ct.end(); + + sp1_zkvm::io::commit(&pi.0); +} + +harness::zk_suits!( + pub mod tests { + use reth_primitives::U256; + use std::str::FromStr; + #[test] + pub fn test_build_from_mock_input() { + // Todo: impl mock input for static unit test + assert_eq!(1, 1); + } + pub fn test_signature() { + let signature = reth_primitives::Signature { + r: U256::from_str( + "18515461264373351373200002665853028612451056578545711640558177340181847433846", + ) + .unwrap(), + s: U256::from_str( + "46948507304638947509940763649030358759909902576025900602547168820602576006531", + ) + .unwrap(), + odd_y_parity: false, + }; + let hash = reth_primitives::B256::from_str( + "daf5a779ae972f972197303d7b574746c7ef83eadac0f2791ad23db92e4c8e53", + ) + .unwrap(); + signature.recover_signer(hash).unwrap(); + } + } +); diff --git a/reqactor/src/backend.rs b/reqactor/src/backend.rs index f482b22ff..b8a7e948d 100644 --- a/reqactor/src/backend.rs +++ b/reqactor/src/backend.rs @@ -433,6 +433,17 @@ impl Backend { { let request_key_ = request_key.clone(); + let pool_status = self + .pool + .get_status(&request_key) + .unwrap() + .unwrap() + .into_status(); + if matches!(pool_status, Status::Success { .. } | Status::WorkInProgress) { + tracing::warn!("Actor Backend received prove-action {request_key}, but it is not registered, skipping"); + return; + } + // 1. Update the request status in pool to WorkInProgress if let Err(err) = self .pool