Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
46 commits
Select commit Hold shift + click to select a range
0c53187
reset
jimjbrettj Nov 18, 2025
82cf49e
override slot in inherent
jimjbrettj Nov 19, 2025
6742b68
create digest provider from client
jimjbrettj Nov 19, 2025
7515f95
encode vs to_vec, clean
jimjbrettj Nov 20, 2025
d4fa12a
refactor timestamp, remove unused fork args
jimjbrettj Nov 20, 2025
32d36ef
remove comments from substrate runtime
jimjbrettj Nov 20, 2025
e6518ee
use next timestamp
jimjbrettj Nov 25, 2025
4f1944d
allow for flexible block times
jimjbrettj Nov 25, 2025
4fa6111
clean up
jimjbrettj Nov 26, 2025
b3c8ddc
remove consensus file
jimjbrettj Nov 26, 2025
e48f584
support no mine mode
jimjbrettj Nov 26, 2025
c1ab1b8
clean up and testing
jimjbrettj Nov 26, 2025
5511309
update backend errors
jimjbrettj Nov 26, 2025
29e5596
improved error messages
jimjbrettj Nov 26, 2025
5609022
add error handling to inherent data providers
jimjbrettj Dec 1, 2025
aea3182
update slot
jimjbrettj Dec 1, 2025
0db55ea
rebase wip/fix build
Dec 5, 2025
23acf05
temp/fix build
Dec 5, 2025
99d5bb0
fmt
Dec 5, 2025
d1b1186
fix non forking
Dec 5, 2025
3239e02
query runtime if chain_id not found
Dec 5, 2025
992c793
lock
Dec 8, 2025
f0a4f3e
get eth chain id rather than parachain id
Dec 8, 2025
bb9cf35
cache chain id rather than write to backend
Dec 9, 2025
81b1f5e
clean chain_id naming
Dec 10, 2025
e12d809
consistancy with fork checkpoint
Dec 15, 2025
939288b
fix chain id method naming
Dec 15, 2025
bc0d0cb
support local nodes for subxt
Dec 15, 2025
94f332e
Initial integration tests using zombienet
dimartiro Dec 18, 2025
329d838
Add more integration tests using zombienet
dimartiro Jan 7, 2026
7a1773a
Fix clippy
dimartiro Jan 9, 2026
a33f365
Fix fmt
dimartiro Jan 9, 2026
c3695de
Put forking tests under a feature
dimartiro Jan 12, 2026
24362ff
Merge remote-tracking branch 'origin/feat/assethubForking' into asset…
dimartiro Jan 12, 2026
36b81f2
Fmt
dimartiro Jan 12, 2026
418408f
Merge pull request #20 from ChainSafe/assethub_zombienet-forking-inte…
dimartiro Jan 12, 2026
359b258
Merge upstream/feature/forking
dimartiro Jan 15, 2026
dad3ae4
Fix clippy
dimartiro Jan 15, 2026
c8f26e8
fix: convert https URLs to wss instead of ws for fork RPC
dimartiro Jan 16, 2026
9fda4f9
refactor: remove unnecessary thread spawn in service::new
dimartiro Jan 16, 2026
5e468c7
Improve forking tests comments
dimartiro Jan 16, 2026
c3f87b9
use e notation for ether amounts in tests
dimartiro Jan 16, 2026
2d17e48
remove unnecesary test
dimartiro Jan 16, 2026
4387aa7
Merge test_fork_can_deploy_contract_from_westend into test_fork_eth_g…
dimartiro Jan 16, 2026
5b04745
Add assert to check block number is increased
dimartiro Jan 22, 2026
463f69b
Remove unnecesary sleep
dimartiro Jan 22, 2026
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2,217 changes: 2,068 additions & 149 deletions Cargo.lock

Large diffs are not rendered by default.

5 changes: 5 additions & 0 deletions crates/anvil-polkadot/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -67,6 +67,10 @@ polkadot-sdk = { git = "https://github.com/paritytech/polkadot-sdk.git", branch
"substrate-frame-rpc-system",
"substrate-rpc-client",
"substrate-wasm-builder",

"cumulus-client-service",
"cumulus-client-parachain-inherent",
"polkadot-primitives",
] }
anvil.workspace = true
anvil-core.workspace = true
Expand Down Expand Up @@ -160,3 +164,4 @@ op-alloy-rpc-types.workspace = true
[features]
default = []
asm-keccak = ["alloy-primitives/asm-keccak"]
forking-tests = []
39 changes: 34 additions & 5 deletions crates/anvil-polkadot/src/api_server/server.rs
Original file line number Diff line number Diff line change
Expand Up @@ -77,6 +77,8 @@ use polkadot_sdk::{
sc_service::{InPoolTransaction, SpawnTaskHandle, TransactionPool},
sp_api::{Metadata as _, ProvideRuntimeApi},
sp_blockchain::Info,
sp_consensus_aura::AuraApi,
sp_consensus_slots::Slot,
sp_core::{self, Hasher, keccak_256},
sp_runtime::{FixedU128, traits::BlakeTwo256},
};
Expand Down Expand Up @@ -110,6 +112,13 @@ pub struct ApiServer {
instance_id: B256,
/// Tracks all active filters
filters: Filters,
hardcoded_chain_id: u64,
}

/// Fetch the chain ID from the substrate chain.
async fn chain_id_from_metadata(api: &OnlineClient<SrcChainConfig>) -> Result<u64> {
let query = subxt_client::constants().revive().chain_id();
api.constants().at(&query).map_err(|err| err.into())
}

impl ApiServer {
Expand Down Expand Up @@ -138,6 +147,16 @@ impl ApiServer {
)
.await?;

let backend = BackendWithOverlay::new(
substrate_service.backend.clone(),
substrate_service.storage_overrides.clone(),
);

// When forking we need to use the chain ID of the forked network, but for non-forking we do
// not want to use this as we allow for the chain_id to be customized. So we will
// not write this to the backend, but cache it to use if we are forking.
let chain_id = chain_id_from_metadata(&api).await?;

let filters_clone = filters.clone();
substrate_service.spawn_handle.spawn("filter-eviction-task", "None", async move {
eviction_task(filters_clone).await;
Expand All @@ -146,10 +165,7 @@ impl ApiServer {
block_provider,
req_receiver,
logging_manager,
backend: BackendWithOverlay::new(
substrate_service.backend.clone(),
substrate_service.storage_overrides.clone(),
),
backend,
client: substrate_service.client.clone(),
mining_engine: substrate_service.mining_engine.clone(),
eth_rpc_client,
Expand All @@ -159,6 +175,7 @@ impl ApiServer {
wallet: DevSigner::new(signers)?,
instance_id: B256::random(),
filters,
hardcoded_chain_id: chain_id,
})
}

Expand Down Expand Up @@ -589,6 +606,14 @@ impl ApiServer {
// Inject the new time if the timestamp precedes last block time
if time_ms < last_block_timestamp {
self.backend.inject_timestamp(latest_block, time_ms);
let current_aura_slot = self.backend.read_aura_current_slot(latest_block)?;
let updated_aura_slot = time_ms
.saturating_div(self.client.runtime_api().slot_duration(latest_block)?.as_millis());
if current_aura_slot > updated_aura_slot {
self.backend.inject_aura_current_slot(latest_block, Slot::from(updated_aura_slot));
self.backend
.inject_relay_slot_info(latest_block, (Slot::from(updated_aura_slot), 0));
}
}
Ok(self.mining_engine.set_time(Duration::from_secs(time)))
}
Expand All @@ -613,7 +638,11 @@ impl ApiServer {
}

fn chain_id(&self, at: Hash) -> u64 {
self.backend.read_chain_id(at).expect("Chain ID is populated on genesis")
self.backend
.read_chain_id(at)
// If chain_id is not found in the backend, we are forking so use the cached chain_id
// from the forked network
.unwrap_or(self.hardcoded_chain_id)
}

// Eth RPCs
Expand Down
1 change: 1 addition & 0 deletions crates/anvil-polkadot/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -119,6 +119,7 @@ pub async fn spawn(
// Spawn the substrate node.
let (substrate_service, task_manager) =
substrate_node::service::new(&anvil_config, substrate_config)
.await
.map_err(sc_cli::Error::Service)?;
let revert_manager =
RevertManager::new(substrate_service.client.clone(), substrate_service.backend.clone());
Expand Down
76 changes: 76 additions & 0 deletions crates/anvil-polkadot/src/substrate_node/service/backend.rs
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@ use polkadot_sdk::{
parachains_common::{AccountId, Hash, opaque::Block},
sc_client_api::{Backend as BackendT, StateBackend, TrieCacheContext},
sp_blockchain,
sp_consensus_slots::Slot,
sp_core::{H160, H256},
sp_io::hashing::blake2_256,
sp_runtime::FixedU128,
Expand All @@ -39,6 +40,12 @@ pub enum BackendError {
MissingNextFeeMultiplier,
#[error("Could not find block number in the state")]
MissingBlockNumber,
#[error("Could not find relay slot info in the state")]
MissingRelaySlotInfo,
#[error("Could not find last relay block number in the state")]
MissingLastRelayBlockNumber,
#[error("Could not find aura current slot in the state")]
MissingAuraCurrentSlot,
#[error("Unable to decode total issuance {0}")]
DecodeTotalIssuance(codec::Error),
#[error("Unable to decode chain id {0}")]
Expand All @@ -59,6 +66,12 @@ pub enum BackendError {
DecodeAuraAuthorities(codec::Error),
#[error("Unable to decode the next fee multiplier: {0}")]
DecodeNextFeeMultiplier(codec::Error),
#[error("Unable to decode relay slot info: {0}")]
DecodeRelaySlotInfo(codec::Error),
#[error("Unable to decode last relay block number: {0}")]
DecodeLastRelayBlockNumber(codec::Error),
#[error("Unable to decode aura current slot: {0}")]
DecodeAuraCurrentSlot(codec::Error),
}

type Result<T> = std::result::Result<T, BackendError>;
Expand All @@ -85,6 +98,31 @@ impl BackendWithOverlay {
u64::decode(&mut &value[..]).map_err(BackendError::DecodeTimestamp)
}

pub fn read_relay_slot_info(&self, hash: Hash) -> Result<(Slot, u32)> {
let key = well_known_keys::RELAY_SLOT_INFO;

let value =
self.read_top_state(hash, key.to_vec())?.ok_or(BackendError::MissingRelaySlotInfo)?;
<(Slot, u32)>::decode(&mut &value[..]).map_err(BackendError::DecodeRelaySlotInfo)
}

pub fn read_last_relay_chain_block_number(&self, hash: Hash) -> Result<u32> {
let key = well_known_keys::LAST_RELAY_CHAIN_BLOCK_NUMBER;

let value = self
.read_top_state(hash, key.to_vec())?
.ok_or(BackendError::MissingLastRelayBlockNumber)?;
u32::decode(&mut &value[..]).map_err(BackendError::DecodeLastRelayBlockNumber)
}

pub fn read_aura_current_slot(&self, hash: Hash) -> Result<Slot> {
let key = well_known_keys::CURRENT_SLOT;

let value =
self.read_top_state(hash, key.to_vec())?.ok_or(BackendError::MissingAuraCurrentSlot)?;
Slot::decode(&mut &value[..]).map_err(BackendError::DecodeAuraCurrentSlot)
}

pub fn read_block_number(&self, hash: Hash) -> Result<u32> {
let key = well_known_keys::BLOCK_NUMBER_KEY;
let value =
Expand Down Expand Up @@ -174,6 +212,21 @@ impl BackendWithOverlay {
overrides.set_timestamp(at, timestamp);
}

pub fn inject_relay_slot_info(&self, at: Hash, slot_info: (Slot, u32)) {
let mut overrides = self.overrides.lock();
overrides.set_relay_slot_info(at, slot_info);
}

pub fn inject_last_relay_chain_block_number(&self, at: Hash, number: u32) {
let mut overrides = self.overrides.lock();
overrides.set_last_relay_chain_block_number(at, number);
}

pub fn inject_aura_current_slot(&self, at: Hash, slot: Slot) {
let mut overrides = self.overrides.lock();
overrides.set_aura_current_slot(at, slot);
}

pub fn inject_chain_id(&self, at: Hash, chain_id: u64) {
let mut overrides = self.overrides.lock();
overrides.set_chain_id(at, chain_id);
Expand Down Expand Up @@ -273,6 +326,29 @@ impl StorageOverrides {
self.add(latest_block, changeset);
}

fn set_relay_slot_info(&mut self, latest_block: Hash, slot_info: (Slot, u32)) {
let mut changeset = BlockOverrides::default();
changeset.top.insert(well_known_keys::RELAY_SLOT_INFO.to_vec(), Some(slot_info.encode()));

self.add(latest_block, changeset);
}

fn set_last_relay_chain_block_number(&mut self, latest_block: Hash, number: u32) {
let mut changeset = BlockOverrides::default();
changeset
.top
.insert(well_known_keys::LAST_RELAY_CHAIN_BLOCK_NUMBER.to_vec(), Some(number.encode()));

self.add(latest_block, changeset);
}

fn set_aura_current_slot(&mut self, latest_block: Hash, slot: Slot) {
let mut changeset = BlockOverrides::default();
changeset.top.insert(well_known_keys::CURRENT_SLOT.to_vec(), Some(slot.encode()));

self.add(latest_block, changeset);
}

fn set_coinbase(&mut self, latest_block: Hash, aura_authority: AccountId) {
let mut changeset = BlockOverrides::default();
changeset.top.insert(
Expand Down
90 changes: 15 additions & 75 deletions crates/anvil-polkadot/src/substrate_node/service/client.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
use crate::{
AnvilNodeConfig,
config::ForkChoice,
substrate_node::{
genesis::DevelopmentGenesisBlockBuilder,
lazy_loading::{
Expand All @@ -25,10 +24,7 @@ use polkadot_sdk::{
sp_blockchain,
sp_core::storage::well_known_keys::CODE,
sp_keystore::KeystorePtr,
sp_runtime::{
generic::SignedBlock,
traits::{Block as BlockT, Header as HeaderT},
},
sp_runtime::generic::SignedBlock,
sp_storage::StorageKey,
};
use std::{collections::HashMap, sync::Arc, time::Duration};
Expand All @@ -41,10 +37,11 @@ pub fn new_client(
config: &mut sc_service::Configuration,
executor: WasmExecutor,
storage_overrides: Arc<Mutex<StorageOverrides>>,
genesis_num: u64,
) -> Result<(Arc<Client>, Arc<Backend>, KeystorePtr, TaskManager), sc_service::error::Error> {
let fork_config: Option<(Arc<dyn RPCClient<Block>>, Block)> =
if let Some(fork_url) = &anvil_config.eth_rpc_url {
let (rpc_client, checkpoint_block) = setup_fork(anvil_config, config, fork_url)?;
let (rpc_client, checkpoint_block) = setup_fork(config, fork_url, genesis_num)?;
Some((rpc_client, checkpoint_block))
} else {
None
Expand Down Expand Up @@ -125,50 +122,11 @@ pub fn new_client(
Ok((Arc::new(client), backend, keystore_container.keystore(), task_manager))
}

/// Resolves the block number to fork from, handling both positive and negative block numbers.
/// Negative numbers are subtracted from the latest block number.
fn resolve_fork_block_number(
rpc_client: &Rpc<Block>,
fork_choice: &ForkChoice,
) -> Result<u32, sp_blockchain::Error> {
match fork_choice {
ForkChoice::Block(block_number) => {
if *block_number < 0 {
// Get the latest block from the chain header
let latest_header = rpc_client
.header(None)
.map_err(|e| {
sp_blockchain::Error::Backend(format!("failed to fetch latest header: {e}"))
})?
.ok_or_else(|| {
sp_blockchain::Error::Backend("latest header not found".into())
})?;

let latest_number: u32 = *latest_header.number();

let offset: u32 = block_number.abs().try_into().map_err(|_| {
sp_blockchain::Error::Backend(format!(
"Block number offset too large: {block_number}"
))
})?;

Ok(latest_number.saturating_sub(offset))
} else {
(*block_number).try_into().map_err(|_| {
sp_blockchain::Error::Backend(format!(
"Invalid fork block number: {block_number}"
))
})
}
}
}
}

/// Fetches the checkpoint block and sets up the chain spec for forking
fn setup_fork(
anvil_config: &AnvilNodeConfig,
config: &mut sc_service::Configuration,
fork_url: &str,
genesis_num: u64,
) -> Result<(Arc<dyn RPCClient<Block>>, Block), sc_service::error::Error> {
let http_client = jsonrpsee::http_client::HttpClientBuilder::default()
.max_request_size(u32::MAX)
Expand All @@ -188,35 +146,17 @@ fn setup_fork(
sp_blockchain::Error::Backend(format!("failed to fetch system_properties: {e}"))
})?;

// Get block hash from fork_choice config
// If no fork_choice is specified, we need to fetch the latest block and use its hash
// for all subsequent requests to avoid inconsistencies if a new block is mined between calls.
let block_hash: <Block as BlockT>::Hash = if let Some(fork_choice) = &anvil_config.fork_choice {
let block_num = resolve_fork_block_number(&rpc_client, fork_choice)?;
rpc_client
.block_hash(Some(block_num))
.map_err(|e| {
sp_blockchain::Error::Backend(format!(
"failed to fetch block hash for block {block_num}: {e}"
))
})?
.ok_or_else(|| {
sp_blockchain::Error::Backend(format!("block hash not found for block {block_num}"))
})?
} else {
// No fork_choice specified, fetch the latest block header and use its hash
let latest_header = rpc_client
.header(None)
.map_err(|e| {
sp_blockchain::Error::Backend(format!(
"failed to fetch latest header for fork: {e}"
))
})?
.ok_or_else(|| {
sp_blockchain::Error::Backend("latest header not found for fork".into())
})?;
latest_header.hash()
};
let block_num = genesis_num as u32;
let block_hash = rpc_client
.block_hash(Some(block_num))
.map_err(|e| {
sp_blockchain::Error::Backend(format!(
"failed to fetch block hash for block {block_num}: {e}"
))
})?
.ok_or_else(|| {
sp_blockchain::Error::Backend(format!("block hash not found for block {block_num}"))
})?;

let wasm_binary = rpc_client
.storage(StorageKey(CODE.to_vec()), Some(block_hash))
Expand Down
Loading
Loading