From a3e1acfab670e768117f3d1e5a433e3f489f6c41 Mon Sep 17 00:00:00 2001 From: kianenigma Date: Sun, 19 Oct 2025 12:19:10 +0100 Subject: [PATCH 1/2] round of code --- src/commands/dry_run/mod.rs | 122 ++++++++++++++++++++++++++++++++++++ src/commands/mod.rs | 1 + src/commands/types.rs | 32 +++++++++- src/main.rs | 17 +++++ 4 files changed, 170 insertions(+), 2 deletions(-) create mode 100644 src/commands/dry_run/mod.rs diff --git a/src/commands/dry_run/mod.rs b/src/commands/dry_run/mod.rs new file mode 100644 index 000000000..3b2253544 --- /dev/null +++ b/src/commands/dry_run/mod.rs @@ -0,0 +1,122 @@ +//! Dry run commands for testing and simulation. + +use crate::{ + client::Client, + commands::multi_block::types::Snapshot, + dynamic::multi_block as dynamic, + error::Error, + prelude::{AccountId, LOG_TARGET}, + runtime::multi_block as runtime, + static_types::multi_block as static_types, + utils, +}; +use polkadot_sdk::pallet_election_provider_multi_block::unsigned::miner::MinerConfig; + +/// Run a dry run at a specific block with a snapshot. +pub async fn at_block_with_snapshot(client: Client, block_hash_str: String) -> Result<(), Error> +where + T: MinerConfig + Send + Sync + 'static, + T::Solution: Send + Sync + 'static, + T::Pages: Send + Sync + 'static, + T::TargetSnapshotPerBlock: Send + Sync + 'static, + T::VoterSnapshotPerBlock: Send + Sync + 'static, + T::MaxVotesPerVoter: Send + Sync + 'static, +{ + log::info!(target: LOG_TARGET, "Running dry run at block {}", block_hash_str); + + // Parse the block hash + let block_hash: polkadot_sdk::sp_core::H256 = + block_hash_str.parse().expect("Failed to parse block hash"); + + log::info!(target: LOG_TARGET, "genesis = {:?}, runtime ={:?}", client.chain_api().genesis_hash() , client.chain_api().runtime_version()); + // Get storage at the specified block + log::info!(target: LOG_TARGET, "Fetching storage at block {}", block_hash); + let storage = utils::storage_at(Some(block_hash), client.chain_api()) + .await + .expect("Failed to get storage at block"); + + // Get the round number at this block + let round = storage + .fetch_or_default(&runtime::storage().multi_block_election().round()) + .await + .expect("Round number not found in storage at the specified block"); + + log::info!(target: LOG_TARGET, "Block round: {}", round); + + // Get desired targets + let desired_targets = storage + .fetch(&runtime::storage().multi_block_election().desired_targets(round)) + .await + .expect("Failed to fetch desired targets") + .unwrap_or(0); + + log::info!(target: LOG_TARGET, "Desired targets: {}", desired_targets); + + // Get number of pages + let n_pages = static_types::Pages::get(); + + log::info!(target: LOG_TARGET, "Number of pages: {}", n_pages); + + // Create a snapshot and fetch all the data + let mut snapshot = Snapshot::::new(n_pages); + + log::info!(target: LOG_TARGET, "Fetching snapshots for round {}...", round); + dynamic::fetch_missing_snapshots::(&mut snapshot, &storage, round) + .await + .expect("Failed to fetch missing snapshots"); + + let (target_snapshot, voter_snapshot) = snapshot.get(); + + log::info!( + target: LOG_TARGET, + "Snapshots fetched - targets: {}, voters across {} pages", + target_snapshot.len(), + voter_snapshot.len() + ); + + // Mine the solution + log::info!(target: LOG_TARGET, "Mining solution..."); + let paged_raw_solution = dynamic::mine_solution::( + target_snapshot, + voter_snapshot, + n_pages, + round, + desired_targets, + 0, // block_number doesn't matter for dry run + true, // do_reduce + ) + .await + .expect("Failed to mine solution"); + + // Print the results + println!("\n========== DRY RUN RESULTS =========="); + println!("Block Hash: {}", block_hash_str); + println!("Round: {}", round); + println!("Desired Targets: {}", desired_targets); + println!("Number of Pages: {}", n_pages); + println!("\nSolution Score:"); + println!(" Minimal Stake: {}", paged_raw_solution.score.minimal_stake); + println!(" Sum Stake: {}", paged_raw_solution.score.sum_stake); + println!(" Sum Stake Squared: {}", paged_raw_solution.score.sum_stake_squared); + println!("\nSolution Pages: {}", paged_raw_solution.solution_pages.len()); + println!("Winner Count: {}", paged_raw_solution.winner_count_single_page_target_snapshot()); + println!("=====================================\n"); + + log::info!(target: LOG_TARGET, "Dry run completed successfully"); + Ok(()) +} + +/// Run a dry run with the current snapshot. +pub async fn with_current_snapshot(_client: Client) -> Result<(), Error> +where + T: MinerConfig + Send + Sync + 'static, + T::Solution: Send + Sync + 'static, + T::Pages: Send + Sync + 'static, + T::TargetSnapshotPerBlock: Send + Sync + 'static, + T::VoterSnapshotPerBlock: Send + Sync + 'static, + T::MaxVotesPerVoter: Send + Sync + 'static, +{ + // TODO: Implementation to be added + log::info!("with_current_snapshot: not yet implemented"); + Ok(()) +} diff --git a/src/commands/mod.rs b/src/commands/mod.rs index 0f6fc2f08..2564c5915 100644 --- a/src/commands/mod.rs +++ b/src/commands/mod.rs @@ -1,4 +1,5 @@ //! Supported commands for the polkadot-staking-miner and related types. +pub mod dry_run; pub mod multi_block; pub mod types; diff --git a/src/commands/types.rs b/src/commands/types.rs index debdb9124..b106f6a19 100644 --- a/src/commands/types.rs +++ b/src/commands/types.rs @@ -13,8 +13,8 @@ pub enum SubmissionStrategy { IfLeading, /// Submit if we are no worse than `Perbill` worse than the best. ClaimNoWorseThan(Perbill), - /// Submit if we are leading, or if the solution that's leading is more that the given `Perbill` - /// better than us. This helps detect obviously fake solutions and still combat them. + /// Submit if we are leading, or if the solution that's leading is more that the given + /// `Perbill` better than us. This helps detect obviously fake solutions and still combat them. ClaimBetterThan(Perbill), } @@ -79,3 +79,31 @@ pub struct MultiBlockMonitorConfig { #[clap(long, default_value_t = false, hide = true)] pub shady: bool, } + +/// Configuration for dry run commands. +#[derive(Debug, Clone, clap::Parser)] +#[cfg_attr(test, derive(PartialEq))] +pub struct DryRunConfig { + #[clap(subcommand)] + pub subcommand: DryRunSubcommand, +} + +/// Dry run subcommands. +#[derive(Debug, Clone, clap::Parser)] +#[cfg_attr(test, derive(PartialEq))] +pub enum DryRunSubcommand { + /// Run a dry run at a specific block with a snapshot. + /// + /// Useful to re-run previous elections. The input block hash needs to be a block in which + /// election snapshot exists. + AtBlockWithSnapshot { + /// The block hash to read the snapshot from. + #[clap(long)] + block_hash: String, + }, + /// Run a dry run with the current snapshot. + /// + /// This will force-create the snapshot, based on best-effort logic, and then run the dry-run + /// using that snapshot. + WithCurrentSnapshot, +} diff --git a/src/main.rs b/src/main.rs index 2b492b51e..50589e9d1 100644 --- a/src/main.rs +++ b/src/main.rs @@ -85,6 +85,8 @@ pub enum Command { Monitor(commands::types::MultiBlockMonitorConfig), /// Check if the staking-miner metadata is compatible to a remote node. Info, + /// Dry run commands for testing and simulation. + DryRun(commands::types::DryRunConfig), } #[tokio::main] @@ -146,6 +148,21 @@ async fn main() -> Result<(), Error> { commands::multi_block::monitor_cmd::(client, cfg).boxed() }) }, + Command::DryRun(cfg) => { + use commands::types::DryRunSubcommand; + match cfg.subcommand { + DryRunSubcommand::AtBlockWithSnapshot { block_hash } => { + macros::for_multi_block_runtime!(chain, { + commands::dry_run::at_block_with_snapshot::(client, block_hash).boxed() + }) + }, + DryRunSubcommand::WithCurrentSnapshot => { + macros::for_multi_block_runtime!(chain, { + commands::dry_run::with_current_snapshot::(client).boxed() + }) + }, + } + }, }; let res = run_command(fut, rx_upgrade).await; From d1da9cc635e6540b922ccbee61e01102d6679021 Mon Sep 17 00:00:00 2001 From: kianenigma Date: Mon, 20 Oct 2025 13:14:53 +0100 Subject: [PATCH 2/2] more work towards dry-run --- src/commands/dry_run/mod.rs | 55 +++++++++++++++++++++++++++++++------ src/main.rs | 2 +- 2 files changed, 48 insertions(+), 9 deletions(-) diff --git a/src/commands/dry_run/mod.rs b/src/commands/dry_run/mod.rs index 3b2253544..86f1de816 100644 --- a/src/commands/dry_run/mod.rs +++ b/src/commands/dry_run/mod.rs @@ -5,15 +5,49 @@ use crate::{ commands::multi_block::types::Snapshot, dynamic::multi_block as dynamic, error::Error, - prelude::{AccountId, LOG_TARGET}, + prelude::{AccountId, ChainClient, Config, LOG_TARGET}, runtime::multi_block as runtime, static_types::multi_block as static_types, utils, }; use polkadot_sdk::pallet_election_provider_multi_block::unsigned::miner::MinerConfig; +use std::{sync::Arc, time::Duration}; +use subxt::backend::{ + legacy::LegacyBackend, + rpc::reconnecting_rpc_client::{ExponentialBackoff, RpcClient as ReconnectingRpcClient}, +}; + +/// Helper function to create a client with Legacy backend for historical block queries +async fn create_legacy_client(uri: &str) -> Result { + log::debug!(target: LOG_TARGET, "Creating Legacy backend client for historical queries"); + + // Create a reconnecting RPC client with exponential backoff + let reconnecting_rpc = ReconnectingRpcClient::builder() + .retry_policy( + ExponentialBackoff::from_millis(500) + .max_delay(Duration::from_secs(30)) + .take(10), // Allow up to 10 retry attempts before giving up + ) + .build(uri.to_string()) + .await + .map_err(|e| Error::Other(format!("Failed to connect: {e:?}")))?; + + let backend: LegacyBackend = LegacyBackend::builder().build(reconnecting_rpc); + let chain_api = ChainClient::from_backend(Arc::new(backend)) + .await + .map_err(|e| Error::Other(format!("Failed to create client: {e:?}")))?; + + log::info!(target: LOG_TARGET, "Connected with Legacy backend for historical block queries"); + + Ok(chain_api) +} /// Run a dry run at a specific block with a snapshot. -pub async fn at_block_with_snapshot(client: Client, block_hash_str: String) -> Result<(), Error> +pub async fn at_block_with_snapshot( + _client: Client, + uri: String, + block_hash_str: String, +) -> Result<(), Error> where T: MinerConfig + Send + Sync + 'static, T::Solution: Send + Sync + 'static, @@ -28,10 +62,13 @@ where let block_hash: polkadot_sdk::sp_core::H256 = block_hash_str.parse().expect("Failed to parse block hash"); - log::info!(target: LOG_TARGET, "genesis = {:?}, runtime ={:?}", client.chain_api().genesis_hash() , client.chain_api().runtime_version()); - // Get storage at the specified block - log::info!(target: LOG_TARGET, "Fetching storage at block {}", block_hash); - let storage = utils::storage_at(Some(block_hash), client.chain_api()) + // Create a legacy backend client for historical block queries + let legacy_client = create_legacy_client(&uri).await.expect("Failed to create legacy client"); + + log::info!(target: LOG_TARGET, "genesis = {:?}, runtime ={:?}", legacy_client.genesis_hash(), legacy_client.runtime_version()); + + // Get storage at the specified block using the legacy client + let storage = utils::storage_at(Some(block_hash), &legacy_client) .await .expect("Failed to get storage at block"); @@ -69,9 +106,11 @@ where log::info!( target: LOG_TARGET, - "Snapshots fetched - targets: {}, voters across {} pages", + "Snapshots fetched - targets: {}, voters across {} pages = {:?} ({:?})", target_snapshot.len(), - voter_snapshot.len() + voter_snapshot.len(), + voter_snapshot.iter().fold(0, |acc, p| acc + p.len()), + voter_snapshot.iter().map(|p| p.len()).collect::>() ); // Mine the solution diff --git a/src/main.rs b/src/main.rs index 50589e9d1..24db12a03 100644 --- a/src/main.rs +++ b/src/main.rs @@ -153,7 +153,7 @@ async fn main() -> Result<(), Error> { match cfg.subcommand { DryRunSubcommand::AtBlockWithSnapshot { block_hash } => { macros::for_multi_block_runtime!(chain, { - commands::dry_run::at_block_with_snapshot::(client, block_hash).boxed() + commands::dry_run::at_block_with_snapshot::(client, uri.clone(), block_hash).boxed() }) }, DryRunSubcommand::WithCurrentSnapshot => {