(#[case] _p: PhantomData) {}
+
#[derive(Clone, Debug, Default)]
struct EventCollector {
events: Arc>>,
@@ -145,8 +143,8 @@ mod persistence_tests {
}
}
- #[tokio::test(flavor = "multi_thread")]
- pub async fn test_voted_view() {
+ #[rstest_reuse::apply(persistence_types)]
+ pub async fn test_voted_view(_p: PhantomData
) {
setup_test();
let tmp = P::tmp_storage().await;
@@ -188,8 +186,8 @@ mod persistence_tests {
);
}
- #[tokio::test(flavor = "multi_thread")]
- pub async fn test_restart_view() {
+ #[rstest_reuse::apply(persistence_types)]
+ pub async fn test_restart_view(_p: PhantomData
) {
setup_test();
let tmp = P::tmp_storage().await;
@@ -250,8 +248,9 @@ mod persistence_tests {
view2 + 1
);
}
- #[tokio::test(flavor = "multi_thread")]
- pub async fn test_store_drb_input() {
+
+ #[rstest_reuse::apply(persistence_types)]
+ pub async fn test_store_drb_input(_p: PhantomData
) {
use hotshot_types::drb::DrbInput;
setup_test();
@@ -295,8 +294,8 @@ mod persistence_tests {
assert_eq!(storage.load_drb_input(10).await.unwrap(), drb_input_3);
}
- #[tokio::test(flavor = "multi_thread")]
- pub async fn test_epoch_info() {
+ #[rstest_reuse::apply(persistence_types)]
+ pub async fn test_epoch_info(_p: PhantomData
) {
setup_test();
let tmp = P::tmp_storage().await;
let storage = P::connect(&tmp).await;
@@ -408,8 +407,8 @@ mod persistence_tests {
}
}
- #[tokio::test(flavor = "multi_thread")]
- pub async fn test_append_and_decide() {
+ #[rstest_reuse::apply(persistence_types)]
+ pub async fn test_append_and_decide(_p: PhantomData
) {
setup_test();
let tmp = P::tmp_storage().await;
@@ -789,8 +788,8 @@ mod persistence_tests {
);
}
- #[tokio::test(flavor = "multi_thread")]
- pub async fn test_upgrade_certificate() {
+ #[rstest_reuse::apply(persistence_types)]
+ pub async fn test_upgrade_certificate(_p: PhantomData
) {
setup_test();
let tmp = P::tmp_storage().await;
@@ -838,8 +837,8 @@ mod persistence_tests {
assert_eq!(view_number, new_view_number_for_certificate);
}
- #[tokio::test(flavor = "multi_thread")]
- pub async fn test_next_epoch_quorum_certificate() {
+ #[rstest_reuse::apply(persistence_types)]
+ pub async fn test_next_epoch_quorum_certificate(_p: PhantomData
) {
setup_test();
let tmp = P::tmp_storage().await;
@@ -900,8 +899,10 @@ mod persistence_tests {
assert_eq!(view_number, new_view_number_for_qc);
}
- #[tokio::test(flavor = "multi_thread")]
- pub async fn test_decide_with_failing_event_consumer() {
+ #[rstest_reuse::apply(persistence_types)]
+ pub async fn test_decide_with_failing_event_consumer(
+ _p: PhantomData
,
+ ) {
#[derive(Clone, Copy, Debug)]
struct FailConsumer;
@@ -1119,8 +1120,8 @@ mod persistence_tests {
}
}
- #[tokio::test(flavor = "multi_thread")]
- pub async fn test_pruning() {
+ #[rstest_reuse::apply(persistence_types)]
+ pub async fn test_pruning(_p: PhantomData
) {
setup_test();
let tmp = P::tmp_storage().await;
@@ -1300,8 +1301,11 @@ mod persistence_tests {
// test for validating stake table event fetching from persistence,
// ensuring that persisted data matches the on-chain events and that event fetcher work correctly.
- #[tokio::test(flavor = "multi_thread")]
+ #[rstest_reuse::apply(persistence_types)]
pub async fn test_stake_table_fetching_from_persistence(
+ #[values(StakeTableContractVersion::V1, StakeTableContractVersion::V2)]
+ stake_table_version: StakeTableContractVersion,
+ _p: PhantomData
,
) -> anyhow::Result<()> {
setup_test();
@@ -1336,7 +1340,7 @@ mod persistence_tests {
.api_config(query_api_options)
.network_config(network_config.clone())
.persistences(persistence_options.clone())
- .pos_hook::(DelegationConfig::MultipleDelegators)
+ .pos_hook::(DelegationConfig::MultipleDelegators, stake_table_version)
.await
.expect("Pos deployment failed")
.build();
@@ -1425,9 +1429,15 @@ mod persistence_tests {
Ok(())
}
- #[tokio::test(flavor = "multi_thread")]
- pub async fn test_stake_table_background_fetching() -> anyhow::Result<()>
- {
+ #[rstest_reuse::apply(persistence_types)]
+ pub async fn test_stake_table_background_fetching(
+ #[values(StakeTableContractVersion::V1, StakeTableContractVersion::V2)]
+ stake_table_version: StakeTableContractVersion,
+ _p: PhantomData
,
+ ) -> anyhow::Result<()> {
+ use espresso_types::v0_3::ChainConfig;
+ use hotshot_contract_adapter::stake_table::StakeTableContractVersion;
+
setup_test();
let blocks_per_epoch = 10;
@@ -1467,16 +1477,19 @@ mod persistence_tests {
.epoch_start_block(1)
.build()
.unwrap();
- args.deploy_all(&mut contracts)
- .await
- .expect("failed to deploy all contracts");
+
+ match stake_table_version {
+ StakeTableContractVersion::V1 => args.deploy_to_stake_table_v1(&mut contracts).await,
+ StakeTableContractVersion::V2 => args.deploy_all(&mut contracts).await,
+ }
+ .expect("contracts deployed");
let st_addr = contracts
.address(Contract::StakeTableProxy)
- .expect("StakeTableProxy address not found");
+ .expect("StakeTableProxy deployed");
let token_addr = contracts
.address(Contract::EspTokenProxy)
- .expect("EspTokenProxy address not found");
+ .expect("EspTokenProxy deployed");
let l1_url = network_config.l1_url().clone();
// new block every 1s
@@ -1575,8 +1588,10 @@ mod persistence_tests {
Ok(())
}
- #[tokio::test(flavor = "multi_thread")]
- pub async fn test_membership_persistence() -> anyhow::Result<()> {
+ #[rstest_reuse::apply(persistence_types)]
+ pub async fn test_membership_persistence(
+ _p: PhantomData
,
+ ) -> anyhow::Result<()> {
setup_test();
let tmp = P::tmp_storage().await;
diff --git a/sequencer/src/persistence/fs.rs b/sequencer/src/persistence/fs.rs
index 5cd91eedb8..cf406be546 100644
--- a/sequencer/src/persistence/fs.rs
+++ b/sequencer/src/persistence/fs.rs
@@ -1886,36 +1886,6 @@ fn epoch_files(
}))
}
-#[cfg(test)]
-mod testing {
- use tempfile::TempDir;
-
- use super::{super::testing::TestablePersistence, *};
-
- #[async_trait]
- impl TestablePersistence for Persistence {
- type Storage = TempDir;
-
- async fn tmp_storage() -> Self::Storage {
- TempDir::new().unwrap()
- }
-
- fn options(storage: &Self::Storage) -> impl PersistenceOptions {
- Options::new(storage.path().into())
- }
- }
-}
-
-#[cfg(test)]
-mod generic_tests {
- use super::{super::persistence_tests, Persistence};
- // For some reason this is the only way to import the macro defined in another module of this
- // crate.
- use crate::*;
-
- instantiate_persistence_tests!(Persistence);
-}
-
#[cfg(test)]
mod test {
use std::marker::PhantomData;
@@ -1936,10 +1906,24 @@ mod test {
use jf_vid::VidScheme;
use sequencer_utils::test_utils::setup_test;
use serde_json::json;
+ use tempfile::TempDir;
use vbs::version::StaticVersionType;
use super::*;
- use crate::{persistence::testing::TestablePersistence, BLSPubKey};
+ use crate::{persistence::tests::TestablePersistence, BLSPubKey};
+
+ #[async_trait]
+ impl TestablePersistence for Persistence {
+ type Storage = TempDir;
+
+ async fn tmp_storage() -> Self::Storage {
+ TempDir::new().unwrap()
+ }
+
+ fn options(storage: &Self::Storage) -> impl PersistenceOptions {
+ Options::new(storage.path().into())
+ }
+ }
#[test]
fn test_config_migrations_add_builder_urls() {
diff --git a/sequencer/src/persistence/sql.rs b/sequencer/src/persistence/sql.rs
index c476ed75e7..b050f51d12 100644
--- a/sequencer/src/persistence/sql.rs
+++ b/sequencer/src/persistence/sql.rs
@@ -2520,7 +2520,8 @@ async fn fetch_leaf_from_proposals(
mod testing {
use hotshot_query_service::data_source::storage::sql::testing::TmpDb;
- use super::{super::testing::TestablePersistence, *};
+ use super::*;
+ use crate::persistence::tests::TestablePersistence;
#[async_trait]
impl TestablePersistence for Persistence {
@@ -2555,16 +2556,6 @@ mod testing {
}
}
-#[cfg(test)]
-mod generic_tests {
- use super::{super::persistence_tests, Persistence};
- // For some reason this is the only way to import the macro defined in another module of this
- // crate.
- use crate::*;
-
- instantiate_persistence_tests!(Persistence);
-}
-
#[cfg(test)]
mod test {
@@ -2595,7 +2586,7 @@ mod test {
use vbs::version::StaticVersionType;
use super::*;
- use crate::{persistence::testing::TestablePersistence, BLSPubKey, PubKey};
+ use crate::{persistence::tests::TestablePersistence as _, BLSPubKey, PubKey};
#[tokio::test(flavor = "multi_thread")]
async fn test_quorum_proposals_leaf_hash_migration() {
diff --git a/staking-cli/Cargo.toml b/staking-cli/Cargo.toml
index a13b2ed56f..ced6e166b8 100644
--- a/staking-cli/Cargo.toml
+++ b/staking-cli/Cargo.toml
@@ -8,8 +8,7 @@ description = "A CLI to interact with the Espresso stake table contract"
[dependencies]
alloy = { workspace = true }
anyhow = { workspace = true }
-ark-ec = { workspace = true }
-ark-ed-on-bn254 = { workspace = true }
+ark-serialize = { workspace = true }
clap = { workspace = true }
clap-serde = "0.5.1"
clap-serde-derive = "0.2.1"
@@ -27,6 +26,8 @@ jf-signature = { workspace = true, features = ["bls", "schnorr"] }
portpicker = { workspace = true }
rand = { workspace = true }
rand_chacha = { workspace = true }
+rstest = { workspace = true }
+rstest_reuse = { workspace = true }
rust_decimal = "1.36.0"
sequencer-utils = { version = "0.1.0", path = "../utils" }
serde = { workspace = true }
diff --git a/staking-cli/src/demo.rs b/staking-cli/src/demo.rs
index b013dd7cfc..8e79a18739 100644
--- a/staking-cli/src/demo.rs
+++ b/staking-cli/src/demo.rs
@@ -143,7 +143,7 @@ pub async fn setup_stake_table_contract_for_test(
commission,
validator_address,
bls_key_pair,
- state_key_pair.ver_key(),
+ state_key_pair,
)
.await?;
assert!(receipt.status());
diff --git a/staking-cli/src/deploy.rs b/staking-cli/src/deploy.rs
index 23d29c16fe..946fd0627d 100644
--- a/staking-cli/src/deploy.rs
+++ b/staking-cli/src/deploy.rs
@@ -14,10 +14,20 @@ use alloy::{
signers::local::PrivateKeySigner,
};
use anyhow::Result;
-use espresso_contract_deployer::build_signer;
-use hotshot_contract_adapter::sol_types::{ERC1967Proxy, EspToken, StakeTable};
+use espresso_contract_deployer::{
+ build_signer, builder::DeployerArgsBuilder,
+ network_config::light_client_genesis_from_stake_table, Contract, Contracts,
+};
+use hotshot_contract_adapter::{
+ sol_types::{
+ EspToken::{self, EspTokenInstance},
+ StakeTable,
+ },
+ stake_table::StakeTableContractVersion,
+};
+use hotshot_state_prover::mock_ledger::STAKE_TABLE_CAPACITY_FOR_TEST;
use hotshot_types::light_client::StateKeyPair;
-use rand::{rngs::StdRng, CryptoRng, RngCore, SeedableRng as _};
+use rand::{rngs::StdRng, CryptoRng, Rng as _, RngCore, SeedableRng as _};
use url::Url;
use crate::{parse::Commission, registration::register_validator, BLSKeyPair, DEV_MNEMONIC};
@@ -28,8 +38,6 @@ type TestProvider = FillProvider<
Ethereum,
>;
-type SchnorrKeyPair = jf_signature::schnorr::KeyPair;
-
#[derive(Debug, Clone)]
pub struct TestSystem {
pub provider: TestProvider,
@@ -47,6 +55,12 @@ pub struct TestSystem {
impl TestSystem {
pub async fn deploy() -> Result {
+ Self::deploy_version(StakeTableContractVersion::V2).await
+ }
+
+ pub async fn deploy_version(
+ stake_table_contract_version: StakeTableContractVersion,
+ ) -> Result {
let exit_escrow_period = Duration::from_secs(1);
let port = portpicker::pick_unused_port().unwrap();
// Spawn anvil
@@ -64,45 +78,45 @@ impl TestSystem {
"Signer address mismatch"
);
- // `EspToken.sol`
- let token_impl = EspToken::deploy(provider.clone()).await?;
- let initial_supply = U256::from(3590000000u64);
- let token_name = "Espresso".to_string();
- let token_symbol = "ESP".to_string();
- let data = token_impl
- .initialize(
- deployer_address,
- deployer_address,
- initial_supply,
- token_name,
- token_symbol,
- )
- .calldata()
- .clone();
-
- let token_proxy =
- ERC1967Proxy::deploy(provider.clone(), *token_impl.address(), data).await?;
- let token = EspToken::new(*token_proxy.address(), provider.clone());
-
- // `StakeTable.sol`
- let stake_table_impl = StakeTable::deploy(provider.clone()).await?;
- let data = stake_table_impl
- .initialize(
- *token_proxy.address(),
- "0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF".parse()?, // fake LC address
- U256::from(exit_escrow_period.as_secs()),
- deployer_address,
- )
- .calldata()
- .clone();
-
- let st_proxy =
- ERC1967Proxy::deploy(provider.clone(), *stake_table_impl.address(), data).await?;
+ // Create a fake stake table to create a genesis state. This is fine because we don't
+ // currently use the light client contract. Will need to be updated once we implement
+ // slashing and call the light client contract from the stake table contract.
+ let blocks_per_epoch = 100;
+ let epoch_start_block = 1;
+ let (genesis_state, genesis_stake) = light_client_genesis_from_stake_table(
+ &Default::default(),
+ STAKE_TABLE_CAPACITY_FOR_TEST,
+ )
+ .unwrap();
+
+ let mut contracts = Contracts::new();
+ let args = DeployerArgsBuilder::default()
+ .deployer(provider.clone())
+ .mock_light_client(true)
+ .genesis_lc_state(genesis_state)
+ .genesis_st_state(genesis_stake)
+ .blocks_per_epoch(blocks_per_epoch)
+ .epoch_start_block(epoch_start_block)
+ .exit_escrow_period(U256::from(exit_escrow_period.as_secs()))
+ .build()
+ .unwrap();
+
+ match stake_table_contract_version {
+ StakeTableContractVersion::V1 => args.deploy_to_stake_table_v1(&mut contracts).await?,
+ StakeTableContractVersion::V2 => args.deploy_all(&mut contracts).await?,
+ };
+
+ let stake_table = contracts
+ .address(Contract::StakeTableProxy)
+ .expect("StakeTableProxy deployed");
+ let token = contracts
+ .address(Contract::EspTokenProxy)
+ .expect("EspTokenProxy deployed");
let approval_amount = parse_ether("1000000")?;
// Approve the stake table contract so it can transfer tokens to itself
- let receipt = token
- .approve(*st_proxy.address(), approval_amount)
+ let receipt = EspTokenInstance::new(token, &provider)
+ .approve(stake_table, approval_amount)
.send()
.await?
.get_receipt()
@@ -116,8 +130,8 @@ impl TestSystem {
provider,
signer,
deployer_address,
- token: *token_proxy.address(),
- stake_table: *st_proxy.address(),
+ token,
+ stake_table,
exit_escrow_period,
rpc_url,
bls_key_pair,
@@ -134,7 +148,7 @@ impl TestSystem {
(
PrivateKeySigner::random_with(rng),
BLSKeyPair::generate(rng),
- SchnorrKeyPair::generate(rng).into(),
+ StateKeyPair::generate_from_seed(rng.gen()),
)
}
@@ -145,7 +159,7 @@ impl TestSystem {
self.commission,
self.deployer_address,
self.bls_key_pair.clone(),
- self.state_key_pair.ver_key(),
+ self.state_key_pair.clone(),
)
.await?;
assert!(receipt.status());
diff --git a/staking-cli/src/lib.rs b/staking-cli/src/lib.rs
index bfd73fdff1..4aa3fe4d2b 100644
--- a/staking-cli/src/lib.rs
+++ b/staking-cli/src/lib.rs
@@ -11,10 +11,7 @@ use anyhow::{bail, Result};
use clap::{Parser, Subcommand};
use clap_serde_derive::ClapSerde;
use demo::DelegationConfig;
-pub(crate) use hotshot_types::{
- light_client::{StateSignKey, StateVerKey},
- signature_key::BLSPrivKey,
-};
+pub(crate) use hotshot_types::{light_client::StateSignKey, signature_key::BLSPrivKey};
pub(crate) use jf_signature::bls_over_bn254::KeyPair as BLSKeyPair;
use parse::Commission;
use sequencer_utils::logging;
diff --git a/staking-cli/src/main.rs b/staking-cli/src/main.rs
index a728790c85..8b544e03dd 100644
--- a/staking-cli/src/main.rs
+++ b/staking-cli/src/main.rs
@@ -14,6 +14,7 @@ use hotshot_contract_adapter::{
evm::DecodeRevert as _,
sol_types::EspToken::{self, EspTokenErrors},
};
+use hotshot_types::light_client::StateKeyPair;
use staking_cli::{
claim::{claim_validator_exit, claim_withdrawal},
delegation::{approve, delegate, undelegate},
@@ -252,7 +253,7 @@ pub async fn main() -> Result<()> {
commission,
account,
(consensus_private_key).into(),
- (&state_private_key).into(),
+ StateKeyPair::from_sign_key(state_private_key),
)
.await
},
@@ -266,7 +267,7 @@ pub async fn main() -> Result<()> {
stake_table_addr,
account,
(consensus_private_key).into(),
- (&state_private_key).into(),
+ StateKeyPair::from_sign_key(state_private_key),
)
.await
},
diff --git a/staking-cli/src/registration.rs b/staking-cli/src/registration.rs
index 360b7693c8..042df255cf 100644
--- a/staking-cli/src/registration.rs
+++ b/staking-cli/src/registration.rs
@@ -1,31 +1,40 @@
use alloy::{
- primitives::Address, providers::Provider, rpc::types::TransactionReceipt,
- sol_types::SolValue as _,
+ primitives::{Address, Bytes},
+ providers::Provider,
+ rpc::types::TransactionReceipt,
};
use anyhow::Result;
-use ark_ec::CurveGroup;
use hotshot_contract_adapter::{
evm::DecodeRevert as _,
sol_types::{
EdOnBN254PointSol, G1PointSol, G2PointSol,
- StakeTable::{self, StakeTableErrors},
+ StakeTableV2::{self, StakeTableV2Errors},
},
+ stake_table::{sign_address_bls, sign_address_schnorr, StakeTableContractVersion},
};
-use jf_signature::constants::CS_ID_BLS_BN254;
+use hotshot_types::{light_client::StateKeyPair, signature_key::BLSKeyPair};
-use crate::{parse::Commission, BLSKeyPair, StateVerKey};
+use crate::parse::Commission;
+/// The ver_key and signature as types that contract bindings expect
fn prepare_bls_payload(
bls_key_pair: &BLSKeyPair,
validator_address: Address,
) -> (G2PointSol, G1PointSol) {
- let bls_vk_sol: G2PointSol = bls_key_pair.ver_key().to_affine().into();
- let sig_sol: G1PointSol = bls_key_pair
- .sign(&validator_address.abi_encode(), CS_ID_BLS_BN254)
- .sigma
- .into_affine()
- .into();
- (bls_vk_sol, sig_sol)
+ (
+ bls_key_pair.ver_key().to_affine().into(),
+ sign_address_bls(bls_key_pair, validator_address),
+ )
+}
+
+// The ver_key and signature as types that contract bindings expect
+fn prepare_schnorr_payload(
+ schnorr_key_pair: &StateKeyPair,
+ validator_address: Address,
+) -> (EdOnBN254PointSol, Bytes) {
+ let schnorr_vk_sol: EdOnBN254PointSol = schnorr_key_pair.ver_key().to_affine().into();
+ let sig = sign_address_schnorr(schnorr_key_pair, validator_address);
+ (schnorr_vk_sol, sig)
}
pub async fn register_validator(
@@ -34,23 +43,45 @@ pub async fn register_validator(
commission: Commission,
validator_address: Address,
bls_key_pair: BLSKeyPair,
- schnorr_vk: StateVerKey,
+ schnorr_key_pair: StateKeyPair,
) -> Result {
- let stake_table = StakeTable::new(stake_table_addr, &provider);
- let (bls_vk_sol, sig_sol) = prepare_bls_payload(&bls_key_pair, validator_address);
- let schnorr_vk_sol: EdOnBN254PointSol = schnorr_vk.to_affine().into();
- Ok(stake_table
- .registerValidator(
- bls_vk_sol,
- schnorr_vk_sol,
- sig_sol.into(),
- commission.to_evm(),
- )
- .send()
- .await
- .maybe_decode_revert::()?
- .get_receipt()
- .await?)
+ // NOTE: the StakeTableV2 ABI is a superset of the V1 ABI because the V2 inherits from V1 so we
+ // can always use the V2 bindings for calling functions and decoding events, even if we are
+ // connected to the V1 contract.
+ let stake_table = StakeTableV2::new(stake_table_addr, &provider);
+ let (bls_vk, bls_sig) = prepare_bls_payload(&bls_key_pair, validator_address);
+ let (schnorr_vk, schnorr_sig) = prepare_schnorr_payload(&schnorr_key_pair, validator_address);
+
+ let version = stake_table.getVersion().call().await?.try_into()?;
+ // There is a race-condition here if the contract is upgraded while this transactions is waiting
+ // to be mined. We're very unlikely to hit this in practice, and since we only perform the
+ // upgrade on decaf this is acceptable.
+ Ok(match version {
+ StakeTableContractVersion::V1 => {
+ stake_table
+ .registerValidator(bls_vk, schnorr_vk, bls_sig.into(), commission.to_evm())
+ .send()
+ .await
+ .maybe_decode_revert::()?
+ .get_receipt()
+ .await?
+ },
+ StakeTableContractVersion::V2 => {
+ stake_table
+ .registerValidatorV2(
+ bls_vk,
+ schnorr_vk,
+ bls_sig.into(),
+ schnorr_sig,
+ commission.to_evm(),
+ )
+ .send()
+ .await
+ .maybe_decode_revert::()?
+ .get_receipt()
+ .await?
+ },
+ })
}
pub async fn update_consensus_keys(
@@ -58,36 +89,63 @@ pub async fn update_consensus_keys(
stake_table_addr: Address,
validator_address: Address,
bls_key_pair: BLSKeyPair,
- schnorr_vk: StateVerKey,
+ schnorr_key_pair: StateKeyPair,
) -> Result {
- let stake_table = StakeTable::new(stake_table_addr, &provider);
- let (bls_vk_sol, sig_sol) = prepare_bls_payload(&bls_key_pair, validator_address);
- let schnorr_vk_sol: EdOnBN254PointSol = schnorr_vk.to_affine().into();
- Ok(stake_table
- .updateConsensusKeys(bls_vk_sol, schnorr_vk_sol, sig_sol.into())
- .send()
- .await
- .maybe_decode_revert::()?
- .get_receipt()
- .await?)
+ // NOTE: the StakeTableV2 ABI is a superset of the V1 ABI because the V2 inherits from V1 so we
+ // can always use the V2 bindings for calling functions and decoding events, even if we are
+ // connected to the V1 contract.
+ let stake_table = StakeTableV2::new(stake_table_addr, &provider);
+ let (bls_vk, bls_sig) = prepare_bls_payload(&bls_key_pair, validator_address);
+ let (schnorr_vk, schnorr_sig) = prepare_schnorr_payload(&schnorr_key_pair, validator_address);
+
+ // There is a race-condition here if the contract is upgraded while this transactions is waiting
+ // to be mined. We're very unlikely to hit this in practice, and since we only perform the
+ // upgrade on decaf this is acceptable.
+ let version = stake_table.getVersion().call().await?.try_into()?;
+ Ok(match version {
+ StakeTableContractVersion::V1 => {
+ stake_table
+ .updateConsensusKeys(bls_vk, schnorr_vk, bls_sig.into())
+ .send()
+ .await
+ .maybe_decode_revert::()?
+ .get_receipt()
+ .await?
+ },
+ StakeTableContractVersion::V2 => {
+ stake_table
+ .updateConsensusKeysV2(bls_vk, schnorr_vk, bls_sig.into(), schnorr_sig)
+ .send()
+ .await
+ .maybe_decode_revert::()?
+ .get_receipt()
+ .await?
+ },
+ })
}
pub async fn deregister_validator(
provider: impl Provider,
stake_table_addr: Address,
) -> Result {
- let stake_table = StakeTable::new(stake_table_addr, &provider);
+ let stake_table = StakeTableV2::new(stake_table_addr, &provider);
Ok(stake_table
.deregisterValidator()
.send()
.await
- .maybe_decode_revert::()?
+ .maybe_decode_revert::()?
.get_receipt()
.await?)
}
#[cfg(test)]
mod test {
+ use alloy::providers::WalletProvider as _;
+ use espresso_contract_deployer::build_provider;
+ use espresso_types::{
+ v0_3::{StakeTableEvent, StakeTableFetcher},
+ L1Client,
+ };
use rand::{rngs::StdRng, SeedableRng as _};
use super::*;
@@ -106,21 +164,21 @@ mod test {
system.commission,
validator_address,
system.bls_key_pair,
- system.state_key_pair.ver_key(),
+ system.state_key_pair,
)
.await?;
assert!(receipt.status());
let event = receipt
- .decoded_log::()
+ .decoded_log::()
.unwrap();
assert_eq!(event.account, validator_address);
assert_eq!(event.commission, system.commission.to_evm());
- assert_eq!(event.blsVk, bls_vk_sol);
- assert_eq!(event.schnorrVk, schnorr_vk_sol);
+ assert_eq!(event.blsVK, bls_vk_sol);
+ assert_eq!(event.schnorrVK, schnorr_vk_sol);
- // TODO verify we can parse keys and verify signature
+ event.data.authenticate()?;
Ok(())
}
@@ -132,7 +190,9 @@ mod test {
let receipt = deregister_validator(&system.provider, system.stake_table).await?;
assert!(receipt.status());
- let event = receipt.decoded_log::().unwrap();
+ let event = receipt
+ .decoded_log::()
+ .unwrap();
assert_eq!(event.validator, system.deployer_address);
Ok(())
@@ -146,26 +206,158 @@ mod test {
let mut rng = StdRng::from_seed([43u8; 32]);
let (_, new_bls, new_schnorr) = TestSystem::gen_keys(&mut rng);
let (bls_vk_sol, _) = prepare_bls_payload(&new_bls, validator_address);
- let schnorr_vk_sol: EdOnBN254PointSol = new_schnorr.ver_key().to_affine().into();
+ let (schnorr_vk_sol, _) = prepare_schnorr_payload(&new_schnorr, validator_address);
let receipt = update_consensus_keys(
&system.provider,
system.stake_table,
validator_address,
new_bls,
- new_schnorr.ver_key(),
+ new_schnorr,
)
.await?;
assert!(receipt.status());
let event = receipt
- .decoded_log::()
+ .decoded_log::()
.unwrap();
assert_eq!(event.account, system.deployer_address);
assert_eq!(event.blsVK, bls_vk_sol);
assert_eq!(event.schnorrVK, schnorr_vk_sol);
+ event.data.authenticate()?;
+
+ Ok(())
+ }
+
+ /// The GCL must remove stake table events with incorrect signatures. This test verifies that a
+ /// validator registered event with incorrect schnorr signature is removed before the stake
+ /// table is computed.
+ #[tokio::test]
+ async fn test_integration_unauthenticated_validator_registered_events_removed() -> Result<()> {
+ let system = TestSystem::deploy().await?;
+
+ // register a validator with correct signature
+ system.register_validator().await?;
+
+ // NOTE: we can't register a validator with a bad BLS signature because the contract will revert
+
+ let provider = build_provider(
+ "test test test test test test test test test test test junk".to_string(),
+ 1,
+ system.rpc_url.clone(),
+ );
+ let validator_address = provider.default_signer_address();
+ let (_, bls_key_pair, schnorr_key_pair) =
+ TestSystem::gen_keys(&mut StdRng::from_seed([1u8; 32]));
+ let (_, _, other_schnorr_key_pair) =
+ TestSystem::gen_keys(&mut StdRng::from_seed([2u8; 32]));
+
+ let (bls_vk, bls_sig) = prepare_bls_payload(&bls_key_pair, validator_address);
+ let (schnorr_vk, _) = prepare_schnorr_payload(&schnorr_key_pair, validator_address);
+
+ // create a valid schnorr signature with the *wrong* key
+ let (_, schnorr_sig_other_key) =
+ prepare_schnorr_payload(&other_schnorr_key_pair, validator_address);
+
+ let stake_table = StakeTableV2::new(system.stake_table, provider);
+
+ // register a validator with the schnorr sig from another key
+ let receipt = stake_table
+ .registerValidatorV2(
+ bls_vk,
+ schnorr_vk,
+ bls_sig.into(),
+ schnorr_sig_other_key.clone(),
+ Commission::try_from("12.34")?.to_evm(),
+ )
+ .send()
+ .await
+ .maybe_decode_revert::()?
+ .get_receipt()
+ .await?;
+ assert!(receipt.status());
+
+ let l1 = L1Client::new(vec![system.rpc_url])?;
+ let events = StakeTableFetcher::fetch_events_from_contract(
+ l1,
+ system.stake_table,
+ Some(0),
+ receipt.block_number.unwrap(),
+ )
+ .await?
+ .sort_events()?;
+
+ // verify that we only have the first RegisterV2 event
+ assert_eq!(events.len(), 1);
+ match events[0].1.clone() {
+ StakeTableEvent::RegisterV2(event) => {
+ assert_eq!(event.account, system.deployer_address);
+ },
+ _ => panic!("expected RegisterV2 event"),
+ }
+ Ok(())
+ }
+
+ /// The GCL must remove stake table events with incorrect signatures. This test verifies that a
+ /// consensus keys update event with incorrect schnorr signature is removed before the stake
+ /// table is computed.
+ #[tokio::test]
+ async fn test_integration_unauthenticated_update_consensus_keys_events_removed() -> Result<()> {
+ let system = TestSystem::deploy().await?;
+
+ // register a validator with correct signature
+ system.register_validator().await?;
+ let validator_address = system.deployer_address;
+
+ // NOTE: we can't register a validator with a bad BLS signature because the contract will revert
+
+ let (_, new_bls_key_pair, new_schnorr_key_pair) =
+ TestSystem::gen_keys(&mut StdRng::from_seed([1u8; 32]));
+ let (_, _, other_schnorr_key_pair) =
+ TestSystem::gen_keys(&mut StdRng::from_seed([2u8; 32]));
+
+ let (bls_vk, bls_sig) = prepare_bls_payload(&new_bls_key_pair, validator_address);
+ let (schnorr_vk, _) = prepare_schnorr_payload(&new_schnorr_key_pair, validator_address);
+
+ // create a valid schnorr signature with the *wrong* key
+ let (_, schnorr_sig_other_key) =
+ prepare_schnorr_payload(&other_schnorr_key_pair, validator_address);
+
+ let stake_table = StakeTableV2::new(system.stake_table, system.provider);
+
+ // update consensus keys with the schnorr sig from another key
+ let receipt = stake_table
+ .updateConsensusKeysV2(bls_vk, schnorr_vk, bls_sig.into(), schnorr_sig_other_key)
+ .send()
+ .await
+ .maybe_decode_revert::()?
+ .get_receipt()
+ .await?;
+ assert!(receipt.status());
+
+ let l1 = L1Client::new(vec![system.rpc_url])?;
+ let events = StakeTableFetcher::fetch_events_from_contract(
+ l1,
+ system.stake_table,
+ Some(0),
+ receipt.block_number.unwrap(),
+ )
+ .await?
+ .sort_events()?;
+
+ // verify that we only have the RegisterV2 event
+ assert_eq!(events.len(), 1);
+ match events[0].1.clone() {
+ StakeTableEvent::RegisterV2(event) => {
+ assert_eq!(event.account, system.deployer_address);
+ },
+ _ => panic!("expected RegisterV2 event"),
+ }
+
+ println!("Events: {events:?}");
+
Ok(())
}
}
diff --git a/staking-cli/tests/cli.rs b/staking-cli/tests/cli.rs
index 8bc7eb5432..bd87dbc892 100644
--- a/staking-cli/tests/cli.rs
+++ b/staking-cli/tests/cli.rs
@@ -8,12 +8,20 @@ use alloy::primitives::{
Address, U256,
};
use anyhow::Result;
+use hotshot_contract_adapter::stake_table::StakeTableContractVersion;
use rand::{rngs::StdRng, SeedableRng as _};
use sequencer_utils::test_utils::setup_test;
-use staking_cli::{demo::DelegationConfig, deploy::Signer, *};
+use staking_cli::{demo::DelegationConfig, deploy, deploy::Signer, Config};
use crate::deploy::TestSystem;
+#[rstest_reuse::template]
+#[rstest::rstest]
+#[case::v1(StakeTableContractVersion::V1)]
+#[case::v2(StakeTableContractVersion::V2)]
+#[tokio::test]
+async fn stake_table_versions(#[case] _version: StakeTableContractVersion) {}
+
const TEST_MNEMONIC: &str = "wool upset allow cheap purity craft hat cute below useful reject door";
trait AssertSuccess {
@@ -160,10 +168,10 @@ fn test_cli_create_file_ledger() -> anyhow::Result<()> {
}
// TODO: ideally we would test that the decoding works for all the commands
-#[tokio::test]
-async fn test_cli_contract_revert() -> Result<()> {
+#[rstest_reuse::apply(stake_table_versions)]
+async fn test_cli_contract_revert(#[case] version: StakeTableContractVersion) -> Result<()> {
setup_test();
- let system = TestSystem::deploy().await?;
+ let system = TestSystem::deploy_version(version).await?;
let mut cmd = base_cmd();
system.args(&mut cmd, Signer::Mnemonic);
@@ -180,10 +188,10 @@ async fn test_cli_contract_revert() -> Result<()> {
Ok(())
}
-#[tokio::test]
-async fn test_cli_register_validator() -> Result<()> {
+#[rstest_reuse::apply(stake_table_versions)]
+async fn test_cli_register_validator(#[case] version: StakeTableContractVersion) -> Result<()> {
setup_test();
- let system = TestSystem::deploy().await?;
+ let system = TestSystem::deploy_version(version).await?;
let mut cmd = base_cmd();
system.args(&mut cmd, Signer::Mnemonic);
cmd.arg("register-validator")
@@ -210,9 +218,9 @@ async fn test_cli_register_validator() -> Result<()> {
Ok(())
}
-#[tokio::test]
-async fn test_cli_update_consensus_keys() -> Result<()> {
- let system = TestSystem::deploy().await?;
+#[rstest_reuse::apply(stake_table_versions)]
+async fn test_cli_update_consensus_keys(#[case] version: StakeTableContractVersion) -> Result<()> {
+ let system = TestSystem::deploy_version(version).await?;
system.register_validator().await?;
let mut rng = StdRng::from_seed([43u8; 32]);
@@ -230,10 +238,10 @@ async fn test_cli_update_consensus_keys() -> Result<()> {
Ok(())
}
-#[tokio::test]
-async fn test_cli_delegate() -> Result<()> {
+#[rstest_reuse::apply(stake_table_versions)]
+async fn test_cli_delegate(#[case] version: StakeTableContractVersion) -> Result<()> {
setup_test();
- let system = TestSystem::deploy().await?;
+ let system = TestSystem::deploy_version(version).await?;
system.register_validator().await?;
let mut cmd = base_cmd();
@@ -248,10 +256,10 @@ async fn test_cli_delegate() -> Result<()> {
Ok(())
}
-#[tokio::test]
-async fn test_cli_deregister_validator() -> Result<()> {
+#[rstest_reuse::apply(stake_table_versions)]
+async fn test_cli_deregister_validator(#[case] version: StakeTableContractVersion) -> Result<()> {
setup_test();
- let system = TestSystem::deploy().await?;
+ let system = TestSystem::deploy_version(version).await?;
system.register_validator().await?;
let mut cmd = base_cmd();
@@ -260,10 +268,10 @@ async fn test_cli_deregister_validator() -> Result<()> {
Ok(())
}
-#[tokio::test]
-async fn test_cli_undelegate() -> Result<()> {
+#[rstest_reuse::apply(stake_table_versions)]
+async fn test_cli_undelegate(#[case] version: StakeTableContractVersion) -> Result<()> {
setup_test();
- let system = TestSystem::deploy().await?;
+ let system = TestSystem::deploy_version(version).await?;
system.register_validator().await?;
let amount = "123";
system.delegate(parse_ether(amount)?).await?;
@@ -280,10 +288,10 @@ async fn test_cli_undelegate() -> Result<()> {
Ok(())
}
-#[tokio::test]
-async fn test_cli_claim_withdrawal() -> Result<()> {
+#[rstest_reuse::apply(stake_table_versions)]
+async fn test_cli_claim_withdrawal(#[case] version: StakeTableContractVersion) -> Result<()> {
setup_test();
- let system = TestSystem::deploy().await?;
+ let system = TestSystem::deploy_version(version).await?;
let amount = U256::from(123);
system.register_validator().await?;
system.delegate(amount).await?;
@@ -300,10 +308,10 @@ async fn test_cli_claim_withdrawal() -> Result<()> {
Ok(())
}
-#[tokio::test]
-async fn test_cli_claim_validator_exit() -> Result<()> {
+#[rstest_reuse::apply(stake_table_versions)]
+async fn test_cli_claim_validator_exit(#[case] version: StakeTableContractVersion) -> Result<()> {
setup_test();
- let system = TestSystem::deploy().await?;
+ let system = TestSystem::deploy_version(version).await?;
let amount = U256::from(123);
system.register_validator().await?;
system.delegate(amount).await?;
@@ -320,10 +328,12 @@ async fn test_cli_claim_validator_exit() -> Result<()> {
Ok(())
}
-#[tokio::test]
-async fn test_cli_stake_for_demo_default_num_validators() -> Result<()> {
+#[rstest_reuse::apply(stake_table_versions)]
+async fn test_cli_stake_for_demo_default_num_validators(
+ #[case] version: StakeTableContractVersion,
+) -> Result<()> {
setup_test();
- let system = TestSystem::deploy().await?;
+ let system = TestSystem::deploy_version(version).await?;
let mut cmd = base_cmd();
system.args(&mut cmd, Signer::Mnemonic);
@@ -331,10 +341,12 @@ async fn test_cli_stake_for_demo_default_num_validators() -> Result<()> {
Ok(())
}
-#[tokio::test]
-async fn test_cli_stake_for_demo_three_validators() -> Result<()> {
+#[rstest_reuse::apply(stake_table_versions)]
+async fn test_cli_stake_for_demo_three_validators(
+ #[case] version: StakeTableContractVersion,
+) -> Result<()> {
setup_test();
- let system = TestSystem::deploy().await?;
+ let system = TestSystem::deploy_version(version).await?;
let mut cmd = base_cmd();
system.args(&mut cmd, Signer::Mnemonic);
@@ -346,9 +358,20 @@ async fn test_cli_stake_for_demo_three_validators() -> Result<()> {
Ok(())
}
-async fn stake_for_demo_delegation_config_helper(config: DelegationConfig) -> Result<()> {
+#[rstest::rstest]
+#[tokio::test]
+async fn stake_for_demo_delegation_config_helper(
+ #[values(StakeTableContractVersion::V1, StakeTableContractVersion::V2)]
+ version: StakeTableContractVersion,
+ #[values(
+ DelegationConfig::EqualAmounts,
+ DelegationConfig::VariableAmounts,
+ DelegationConfig::MultipleDelegators
+ )]
+ config: DelegationConfig,
+) -> Result<()> {
setup_test();
- let system = TestSystem::deploy().await?;
+ let system = TestSystem::deploy_version(version).await?;
let mut cmd = base_cmd();
system.args(&mut cmd, Signer::Mnemonic);
@@ -360,25 +383,10 @@ async fn stake_for_demo_delegation_config_helper(config: DelegationConfig) -> Re
Ok(())
}
-#[tokio::test]
-async fn test_cli_stake_for_demo_delegation_config_equal_amounts() -> Result<()> {
- stake_for_demo_delegation_config_helper(DelegationConfig::EqualAmounts).await
-}
-
-#[tokio::test]
-async fn test_cli_stake_for_demo_delegation_config_variable_amounts() -> Result<()> {
- stake_for_demo_delegation_config_helper(DelegationConfig::VariableAmounts).await
-}
-
-#[tokio::test]
-async fn test_cli_stake_for_demo_delegation_config_multiple_delegators() -> Result<()> {
- stake_for_demo_delegation_config_helper(DelegationConfig::MultipleDelegators).await
-}
-
-#[tokio::test]
-async fn test_cli_approve() -> Result<()> {
+#[rstest_reuse::apply(stake_table_versions)]
+async fn test_cli_approve(#[case] version: StakeTableContractVersion) -> Result<()> {
setup_test();
- let system = TestSystem::deploy().await?;
+ let system = TestSystem::deploy_version(version).await?;
let amount = "123";
let mut cmd = base_cmd();
@@ -394,10 +402,10 @@ async fn test_cli_approve() -> Result<()> {
Ok(())
}
-#[tokio::test]
-async fn test_cli_balance() -> Result<()> {
+#[rstest_reuse::apply(stake_table_versions)]
+async fn test_cli_balance(#[case] version: StakeTableContractVersion) -> Result<()> {
setup_test();
- let system = TestSystem::deploy().await?;
+ let system = TestSystem::deploy_version(version).await?;
// Check balance of account owner
let mut cmd = base_cmd();
@@ -427,10 +435,10 @@ async fn test_cli_balance() -> Result<()> {
Ok(())
}
-#[tokio::test]
-async fn test_cli_allowance() -> Result<()> {
+#[rstest_reuse::apply(stake_table_versions)]
+async fn test_cli_allowance(#[case] version: StakeTableContractVersion) -> Result<()> {
setup_test();
- let system = TestSystem::deploy().await?;
+ let system = TestSystem::deploy_version(version).await?;
// Check allowance of account owner
let mut cmd = base_cmd();
@@ -458,10 +466,10 @@ async fn test_cli_allowance() -> Result<()> {
Ok(())
}
-#[tokio::test]
-async fn test_cli_transfer() -> Result<()> {
+#[rstest_reuse::apply(stake_table_versions)]
+async fn test_cli_transfer(#[case] version: StakeTableContractVersion) -> Result<()> {
setup_test();
- let system = TestSystem::deploy().await?;
+ let system = TestSystem::deploy_version(version).await?;
let addr = "0x1111111111111111111111111111111111111111".parse::
()?;
let amount = parse_ether("0.123")?;
let mut cmd = base_cmd();
@@ -479,10 +487,10 @@ async fn test_cli_transfer() -> Result<()> {
Ok(())
}
-#[tokio::test]
-async fn test_cli_stake_table_full() -> Result<()> {
+#[rstest_reuse::apply(stake_table_versions)]
+async fn test_cli_stake_table_full(#[case] version: StakeTableContractVersion) -> Result<()> {
setup_test();
- let system = TestSystem::deploy().await?;
+ let system = TestSystem::deploy_version(version).await?;
system.register_validator().await?;
let amount = parse_ether("0.123")?;
@@ -502,10 +510,10 @@ async fn test_cli_stake_table_full() -> Result<()> {
Ok(())
}
-#[tokio::test]
-async fn test_cli_stake_table_compact() -> Result<()> {
+#[rstest_reuse::apply(stake_table_versions)]
+async fn test_cli_stake_table_compact(#[case] version: StakeTableContractVersion) -> Result<()> {
setup_test();
- let system = TestSystem::deploy().await?;
+ let system = TestSystem::deploy_version(version).await?;
system.register_validator().await?;
let amount = parse_ether("0.123")?;
diff --git a/types/Cargo.toml b/types/Cargo.toml
index 4d36c95eba..b03658612a 100644
--- a/types/Cargo.toml
+++ b/types/Cargo.toml
@@ -65,6 +65,8 @@ vid = { workspace = true }
espresso-contract-deployer = { path = "../contracts/rust/deployer" }
espresso-types = { path = ".", features = [ "testing" ] }
portpicker = { workspace = true }
+rstest = { workspace = true }
+rstest_reuse = { workspace = true }
[package.metadata.cargo-machete]
ignored = ["base64_bytes", "hotshot_testing"]
diff --git a/types/src/v0/impls/stake_table.rs b/types/src/v0/impls/stake_table.rs
index 728c3b59a9..d39c883bce 100644
--- a/types/src/v0/impls/stake_table.rs
+++ b/types/src/v0/impls/stake_table.rs
@@ -14,8 +14,9 @@ use async_lock::{Mutex, RwLock};
use committable::Committable;
use futures::stream::{self, StreamExt};
use hotshot::types::{BLSPubKey, SchnorrPubKey, SignatureKey as _};
-use hotshot_contract_adapter::sol_types::StakeTable::{
- self, ConsensusKeysUpdated, Delegated, Undelegated, ValidatorExit, ValidatorRegistered,
+use hotshot_contract_adapter::sol_types::StakeTableV2::{
+ self, ConsensusKeysUpdated, ConsensusKeysUpdatedV2, Delegated, Undelegated, ValidatorExit,
+ ValidatorRegistered, ValidatorRegisteredV2,
};
use hotshot_types::{
data::{vid_disperse::VID_TARGET_TOTAL_STAKE, EpochNumber},
@@ -49,24 +50,96 @@ use crate::traits::EventsPersistenceRead;
type Epoch = ::Epoch;
+/// Format the alloy Log RPC type in a way to make it easy to find the event in an explorer.
+trait DisplayLog {
+ fn display(&self) -> String;
+}
+
+impl DisplayLog for Log {
+ fn display(&self) -> String {
+ // These values are all unlikely to be missing because we only create Log variables by
+ // fetching them from the RPC, so for simplicity we use defaults if the any of the values
+ // are missing.
+ let block = self.block_number.unwrap_or_default();
+ let index = self.log_index.unwrap_or_default();
+ let hash = self.transaction_hash.unwrap_or_default();
+ format!("Log(block={block},index={index},transaction_hash={hash})")
+ }
+}
+
#[derive(Clone, PartialEq)]
pub struct StakeTableEvents {
registrations: Vec<(ValidatorRegistered, Log)>,
+ registrations_v2: Vec<(ValidatorRegisteredV2, Log)>,
deregistrations: Vec<(ValidatorExit, Log)>,
delegated: Vec<(Delegated, Log)>,
undelegated: Vec<(Undelegated, Log)>,
keys: Vec<(ConsensusKeysUpdated, Log)>,
+ keys_v2: Vec<(ConsensusKeysUpdatedV2, Log)>,
}
impl StakeTableEvents {
+ /// Creates a new instance of `StakeTableEvents` with the provided events.
+ ///
+ /// Remove unauthenticated registration and key update events
+ fn from_l1_logs(
+ registrations: Vec<(ValidatorRegistered, Log)>,
+ registrations_v2: Vec<(ValidatorRegisteredV2, Log)>,
+ deregistrations: Vec<(ValidatorExit, Log)>,
+ delegated: Vec<(Delegated, Log)>,
+ undelegated: Vec<(Undelegated, Log)>,
+ keys: Vec<(ConsensusKeysUpdated, Log)>,
+ keys_v2: Vec<(ConsensusKeysUpdatedV2, Log)>,
+ ) -> Self {
+ let registrations_v2 = registrations_v2
+ .into_iter()
+ .filter(|(event, log)| {
+ event
+ .authenticate()
+ .map_err(|_| {
+ tracing::warn!(
+ "Failed to authenticate ValidatorRegisteredV2 event {}",
+ log.display()
+ );
+ })
+ .is_ok()
+ })
+ .collect();
+ let keys_v2 = keys_v2
+ .into_iter()
+ .filter(|(event, log)| {
+ event
+ .authenticate()
+ .map_err(|_| {
+ tracing::warn!(
+ "Failed to authenticate ConsensusKeysUpdatedV2 event {}",
+ log.display()
+ );
+ })
+ .is_ok()
+ })
+ .collect();
+ Self {
+ registrations,
+ registrations_v2,
+ deregistrations,
+ delegated,
+ undelegated,
+ keys,
+ keys_v2,
+ }
+ }
+
pub fn sort_events(self) -> anyhow::Result> {
let mut events: Vec<(EventKey, StakeTableEvent)> = Vec::new();
let Self {
registrations,
+ registrations_v2,
deregistrations,
delegated,
undelegated,
keys,
+ keys_v2,
} = self;
for (registration, log) in registrations {
@@ -78,6 +151,15 @@ impl StakeTableEvents {
registration.into(),
));
}
+ for (registration, log) in registrations_v2 {
+ events.push((
+ (
+ log.block_number.context("block number")?,
+ log.log_index.context("log index")?,
+ ),
+ registration.into(),
+ ));
+ }
for (dereg, log) in deregistrations {
events.push((
(
@@ -115,6 +197,15 @@ impl StakeTableEvents {
update.into(),
));
}
+ for (update, log) in keys_v2 {
+ events.push((
+ (
+ log.block_number.context("block number")?,
+ log.log_index.context("log index")?,
+ ),
+ update.into(),
+ ));
+ }
events.sort_by_key(|(key, _)| *key);
Ok(events)
@@ -122,6 +213,9 @@ impl StakeTableEvents {
}
/// Extract all validators from L1 stake table events.
+// TODO: MA we should reject ValidatorRegistered and ConsensusKeysUpdated events after the stake
+// table contract has been updated to V2, this is currently however not a safety issue because the
+// V2 contract will not generate the V1 events after the upgrade to V2.
pub fn validators_from_l1_events>(
events: I,
) -> anyhow::Result>> {
@@ -137,19 +231,60 @@ pub fn validators_from_l1_events>(
schnorrVk,
commission,
}) => {
- // TODO(abdul): BLS and Schnorr signature keys verification
- let stake_table_key: BLSPubKey = blsVk.clone().into();
- let state_ver_key: SchnorrPubKey = schnorrVk.clone().into();
- // TODO(MA): The stake table contract currently enforces that each bls key is only used once. We will
- // move this check to the confirmation layer and remove it from the contract. Once we have the signature
- // check in this functions we can skip if a BLS key, or Schnorr key was previously used.
+ let stake_table_key: BLSPubKey = blsVk.into();
+ let state_ver_key: SchnorrPubKey = schnorrVk.into();
+ // The stake table contract enforces that each bls key is only used once.
+ if bls_keys.contains(&stake_table_key) {
+ bail!("bls key already used: {}", stake_table_key.to_string());
+ };
+
+ // The contract does *not* enforce that each schnorr key is only used once,
+ // therefore it's possible to have multiple validators with the same schnorr key.
+ if schnorr_keys.contains(&state_ver_key) {
+ tracing::warn!("schnorr key already used: {}", state_ver_key.to_string());
+ };
+
+ bls_keys.insert(stake_table_key);
+ schnorr_keys.insert(state_ver_key.clone());
+
+ match validators.entry(account) {
+ indexmap::map::Entry::Occupied(_occupied_entry) => {
+ bail!("validator {:#x} already registered", *account)
+ },
+ indexmap::map::Entry::Vacant(vacant_entry) => vacant_entry.insert(Validator {
+ account,
+ stake_table_key,
+ state_ver_key,
+ stake: U256::from(0_u64),
+ commission,
+ delegators: HashMap::default(),
+ }),
+ };
+ },
+ StakeTableEvent::RegisterV2(event) => {
+ // Signature authentication is performed right after fetching, if we get an
+ // unauthenticated event here, something went wrong, we abort early.
+ event
+ .authenticate()
+ .context("Failed to authenticate event: {event:?}")?;
+ let ValidatorRegisteredV2 {
+ account,
+ blsVK,
+ schnorrVK,
+ commission,
+ ..
+ } = event;
+
+ let stake_table_key: BLSPubKey = blsVK.into();
+ let state_ver_key: SchnorrPubKey = schnorrVK.into();
+ // The stake table contract enforces that each bls key is only used once.
if bls_keys.contains(&stake_table_key) {
- bail!("bls key {} already used", stake_table_key.to_string());
+ bail!("bls key already used: {}", stake_table_key.to_string());
};
// The contract does *not* enforce that each schnorr key is only used once.
if schnorr_keys.contains(&state_ver_key) {
- tracing::warn!("schnorr key {} already used", state_ver_key.to_string());
+ tracing::warn!("schnorr key already used: {}", state_ver_key.to_string());
};
bls_keys.insert(stake_table_key);
@@ -232,6 +367,56 @@ pub fn validators_from_l1_events>(
blsVK,
schnorrVK,
} = update;
+ let validator = validators
+ .get_mut(&account)
+ .with_context(|| "validator {account:#x} not found")?;
+ let stake_table_key: BLSPubKey = blsVK.into();
+ let state_ver_key: SchnorrPubKey = schnorrVK.into();
+ // The stake table contract enforces that each bls key is only used once.
+ if bls_keys.contains(&stake_table_key) {
+ bail!("bls key already used: {}", stake_table_key.to_string());
+ };
+
+ // The contract does *not* enforce that each schnorr key is only used once,
+ // therefore it's possible to have multiple validators with the same schnorr key.
+ if schnorr_keys.contains(&state_ver_key) {
+ tracing::warn!("schnorr key already used: {}", state_ver_key.to_string());
+ };
+
+ let bls = blsVK.into();
+ let state_ver_key = schnorrVK.into();
+
+ validator.stake_table_key = bls;
+ validator.state_ver_key = state_ver_key;
+ },
+ StakeTableEvent::KeyUpdateV2(update) => {
+ // Signature authentication is performed right after fetching, if we get an
+ // unauthenticated event here, something went wrong, we abort early.
+ update
+ .authenticate()
+ .context("Failed to authenticate event: {event:?}")?;
+
+ let ConsensusKeysUpdatedV2 {
+ account,
+ blsVK,
+ schnorrVK,
+ ..
+ } = update;
+
+ // The stake table contract enforces that each bls key is only used once.
+ let stake_table_key: BLSPubKey = blsVK.into();
+ let state_ver_key: SchnorrPubKey = schnorrVK.into();
+ // The stake table contract enforces that each bls key is only used once.
+ if bls_keys.contains(&stake_table_key) {
+ bail!("bls key already used: {}", stake_table_key.to_string());
+ };
+
+ // The contract does *not* enforce that each schnorr key is only used once,
+ // therefore it's possible to have multiple validators with the same schnorr key.
+ if schnorr_keys.contains(&state_ver_key) {
+ tracing::warn!("schnorr key already used: {}", state_ver_key.to_string());
+ };
+
let validator = validators
.get_mut(&account)
.with_context(|| "validator {account:#x} not found")?;
@@ -318,10 +503,12 @@ impl std::fmt::Debug for StakeTableEvent {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
StakeTableEvent::Register(event) => write!(f, "Register({:?})", event.account),
+ StakeTableEvent::RegisterV2(event) => write!(f, "RegisterV2({:?})", event.account),
StakeTableEvent::Deregister(event) => write!(f, "Deregister({:?})", event.validator),
StakeTableEvent::Delegate(event) => write!(f, "Delegate({:?})", event.delegator),
StakeTableEvent::Undelegate(event) => write!(f, "Undelegate({:?})", event.delegator),
StakeTableEvent::KeyUpdate(event) => write!(f, "KeyUpdate({:?})", event.account),
+ StakeTableEvent::KeyUpdateV2(event) => write!(f, "KeyUpdateV2({:?})", event.account),
}
}
}
@@ -506,7 +693,7 @@ impl StakeTableFetcher {
from_block: Option,
to_block: u64,
) -> anyhow::Result {
- let stake_table_contract = StakeTable::new(contract, l1_client.provider.clone());
+ let stake_table_contract = StakeTableV2::new(contract, l1_client.provider.clone());
// get the block number when the contract was initialized
// to avoid fetching events from block number 0
@@ -571,6 +758,40 @@ impl StakeTableFetcher {
}
});
+ // fetch registered events v2
+ // retry if the call to the provider to fetch the events fails
+ let registered_events_v2 = stream::iter(chunks.clone()).then(|(from, to)| {
+ let retry_delay = l1_client.options().l1_retry_delay;
+ let stake_table_contract = stake_table_contract.clone();
+ async move {
+ tracing::debug!(from, to, "fetch ValidatorRegisteredV2 events in range");
+ loop {
+ match stake_table_contract
+ .clone()
+ .ValidatorRegisteredV2_filter()
+ .from_block(from)
+ .to_block(to)
+ .query()
+ .await
+ {
+ Ok(events) => {
+ break stream::iter(events.into_iter().filter(|(event, log)| {
+ if let Err(e) = event.authenticate() {
+ tracing::warn!(%e, "Failed to authenticate ValidatorRegisteredV2 event: {}", log.display());
+ return false;
+ }
+ true
+ }));
+ },
+ Err(err) => {
+ tracing::warn!(from, to, %err, "ValidatorRegisteredV2 Error");
+ sleep(retry_delay).await;
+ },
+ }
+ }
+ }
+ });
+
// fetch validator de registration events
let deregistered_events = stream::iter(chunks.clone()).then(|(from, to)| {
let retry_delay = l1_client.options().l1_retry_delay;
@@ -643,7 +864,7 @@ impl StakeTableFetcher {
});
// fetch consensus keys updated events
- let keys_update_events = stream::iter(chunks).then(|(from, to)| {
+ let keys_update_events = stream::iter(chunks.clone()).then(|(from, to)| {
let retry_delay = l1_client.options().l1_retry_delay;
let stake_table_contract = stake_table_contract.clone();
async move {
@@ -666,19 +887,55 @@ impl StakeTableFetcher {
}
});
+ // fetch consensus keys updated v2 events
+ let keys_update_events_v2 = stream::iter(chunks).then(|(from, to)| {
+ let retry_delay = l1_client.options().l1_retry_delay;
+ let stake_table_contract = stake_table_contract.clone();
+ async move {
+ tracing::debug!(from, to, "fetch ConsensusKeysUpdatedV2 events in range");
+ loop {
+ match stake_table_contract
+ .ConsensusKeysUpdatedV2_filter()
+ .from_block(from)
+ .to_block(to)
+ .query()
+ .await
+ {
+ Ok(events) => {
+ break stream::iter(events.into_iter().filter(|(event, log)| {
+ if let Err(e) = event.authenticate() {
+ tracing::warn!(%e, "Failed to authenticate ConsensusKeysUpdatedV2 event {}", log.display());
+ return false;
+ }
+ true
+ }));
+ },
+ Err(err) => {
+ tracing::warn!(from, to, %err, "ConsensusKeysUpdatedV2 Error");
+ sleep(retry_delay).await;
+ },
+ }
+ }
+ }
+ });
+
let registrations = registered_events.flatten().collect().await;
+ let registrations_v2 = registered_events_v2.flatten().collect().await;
let deregistrations = deregistered_events.flatten().collect().await;
let delegated = delegated_events.flatten().collect().await;
let undelegated = undelegated_events.flatten().collect().await;
let keys = keys_update_events.flatten().collect().await;
+ let keys_v2 = keys_update_events_v2.flatten().collect().await;
- Ok(StakeTableEvents {
+ Ok(StakeTableEvents::from_l1_logs(
registrations,
+ registrations_v2,
deregistrations,
delegated,
undelegated,
keys,
- })
+ keys_v2,
+ ))
}
/// Get `StakeTable` at specific l1 block height.
@@ -1422,38 +1679,108 @@ impl DAMembers {
#[cfg(any(test, feature = "testing"))]
pub mod testing {
- use hotshot_contract_adapter::sol_types::{EdOnBN254PointSol, G2PointSol};
- use hotshot_types::light_client::StateKeyPair;
+ use alloy::primitives::Bytes;
+ use hotshot_contract_adapter::{
+ sol_types::{EdOnBN254PointSol, G1PointSol, G2PointSol},
+ stake_table::{sign_address_bls, sign_address_schnorr},
+ };
+ use hotshot_types::{light_client::StateKeyPair, signature_key::BLSKeyPair};
use rand::{Rng as _, RngCore as _};
use super::*;
// TODO: current tests are just sanity checks, we need more.
+ #[derive(Debug, Clone)]
pub struct TestValidator {
pub account: Address,
pub bls_vk: G2PointSol,
pub schnorr_vk: EdOnBN254PointSol,
pub commission: u16,
+ pub bls_sig: G1PointSol,
+ pub schnorr_sig: Bytes,
}
impl TestValidator {
pub fn random() -> Self {
- let rng = &mut rand::thread_rng();
+ let account = Address::random();
+ let commission = rand::thread_rng().gen_range(0..10000);
+ Self::random_update_keys(account, commission)
+ }
+
+ pub fn randomize_keys(&self) -> Self {
+ Self::random_update_keys(self.account, self.commission)
+ }
+
+ fn random_update_keys(account: Address, commission: u16) -> Self {
+ let mut rng = &mut rand::thread_rng();
let mut seed = [0u8; 32];
rng.fill_bytes(&mut seed);
+ let bls_key_pair = BLSKeyPair::generate(&mut rng);
+ let bls_sig = sign_address_bls(&bls_key_pair, account);
+ let schnorr_key_pair = StateKeyPair::generate_from_seed_indexed(seed, 0);
+ let schnorr_sig = sign_address_schnorr(&schnorr_key_pair, account);
+ Self {
+ account,
+ bls_vk: bls_key_pair.ver_key().to_affine().into(),
+ schnorr_vk: schnorr_key_pair.ver_key().to_affine().into(),
+ commission,
+ bls_sig,
+ schnorr_sig,
+ }
+ }
+ }
- let (bls_vk, _) = BLSPubKey::generated_from_seed_indexed(seed, 0);
- let schnorr_vk: EdOnBN254PointSol = StateKeyPair::generate_from_seed_indexed(seed, 0)
- .ver_key()
- .to_affine()
- .into();
+ impl From<&TestValidator> for ValidatorRegistered {
+ fn from(value: &TestValidator) -> Self {
+ Self {
+ account: value.account,
+ blsVk: value.bls_vk,
+ schnorrVk: value.schnorr_vk,
+ commission: value.commission,
+ }
+ }
+ }
+ impl From<&TestValidator> for ValidatorRegisteredV2 {
+ fn from(value: &TestValidator) -> Self {
Self {
- account: Address::random(),
- bls_vk: bls_vk.to_affine().into(),
- schnorr_vk,
- commission: rng.gen_range(0..10000),
+ account: value.account,
+ blsVK: value.bls_vk,
+ schnorrVK: value.schnorr_vk,
+ commission: value.commission,
+ blsSig: value.bls_sig.into(),
+ schnorrSig: value.schnorr_sig.clone(),
+ }
+ }
+ }
+
+ impl From<&TestValidator> for ConsensusKeysUpdated {
+ fn from(value: &TestValidator) -> Self {
+ Self {
+ account: value.account,
+ blsVK: value.bls_vk,
+ schnorrVK: value.schnorr_vk,
+ }
+ }
+ }
+
+ impl From<&TestValidator> for ConsensusKeysUpdatedV2 {
+ fn from(value: &TestValidator) -> Self {
+ Self {
+ account: value.account,
+ blsVK: value.bls_vk,
+ schnorrVK: value.schnorr_vk,
+ blsSig: value.bls_sig.into(),
+ schnorrSig: value.schnorr_sig.clone(),
+ }
+ }
+ }
+
+ impl From<&TestValidator> for ValidatorExit {
+ fn from(value: &TestValidator) -> Self {
+ Self {
+ validator: value.account,
}
}
}
@@ -1472,8 +1799,8 @@ pub mod testing {
validator_stake += alloy::primitives::U256::from(stake);
}
- let stake_table_key = val.bls_vk.clone().into();
- let state_ver_key = val.schnorr_vk.clone().into();
+ let stake_table_key = val.bls_vk.into();
+ let state_ver_key = val.schnorr_vk.into();
Validator {
account: val.account,
@@ -1489,7 +1816,8 @@ pub mod testing {
#[cfg(test)]
mod tests {
- use alloy::primitives::Address;
+ use alloy::{primitives::Address, rpc::types::Log};
+ use hotshot_contract_adapter::stake_table::StakeTableContractVersion;
use sequencer_utils::test_utils::setup_test;
use super::*;
@@ -1499,60 +1827,73 @@ mod tests {
fn test_from_l1_events() -> anyhow::Result<()> {
setup_test();
// Build a stake table with one DA node and one consensus node.
- let val = TestValidator::random();
- let val_new_keys = TestValidator::random();
+ let val_1 = TestValidator::random();
+ let val_1_new_keys = val_1.randomize_keys();
+ let val_2 = TestValidator::random();
+ let val_2_new_keys = val_2.randomize_keys();
let delegator = Address::random();
let mut events: Vec = [
- ValidatorRegistered {
- account: val.account,
- blsVk: val.bls_vk.clone(),
- schnorrVk: val.schnorr_vk.clone(),
- commission: val.commission,
- }
- .into(),
+ ValidatorRegistered::from(&val_1).into(),
+ ValidatorRegisteredV2::from(&val_2).into(),
Delegated {
delegator,
- validator: val.account,
+ validator: val_1.account,
amount: U256::from(10),
}
.into(),
- ConsensusKeysUpdated {
- account: val.account,
- blsVK: val_new_keys.bls_vk.clone(),
- schnorrVK: val_new_keys.schnorr_vk.clone(),
- }
- .into(),
+ ConsensusKeysUpdated::from(&val_1_new_keys).into(),
+ ConsensusKeysUpdatedV2::from(&val_2_new_keys).into(),
Undelegated {
delegator,
- validator: val.account,
+ validator: val_1.account,
amount: U256::from(7),
}
.into(),
// delegate to the same validator again
Delegated {
delegator,
- validator: val.account,
+ validator: val_1.account,
amount: U256::from(5),
}
.into(),
+ // delegate to the second validator
+ Delegated {
+ delegator: Address::random(),
+ validator: val_2.account,
+ amount: U256::from(3),
+ }
+ .into(),
]
.to_vec();
let st = active_validator_set_from_l1_events(events.iter().cloned())?;
- let st_val = st.get(&val.account).unwrap();
+ let st_val_1 = st.get(&val_1.account).unwrap();
// final staked amount should be 10 (delegated) - 7 (undelegated) + 5 (Delegated)
- assert_eq!(st_val.stake, U256::from(8));
- assert_eq!(st_val.commission, val.commission);
- assert_eq!(st_val.delegators.len(), 1);
+ assert_eq!(st_val_1.stake, U256::from(8));
+ assert_eq!(st_val_1.commission, val_1.commission);
+ assert_eq!(st_val_1.delegators.len(), 1);
// final delegated amount should be 10 (delegated) - 7 (undelegated) + 5 (Delegated)
- assert_eq!(*st_val.delegators.get(&delegator).unwrap(), U256::from(8));
+ assert_eq!(*st_val_1.delegators.get(&delegator).unwrap(), U256::from(8));
- events.push(
- ValidatorExit {
- validator: val.account,
- }
- .into(),
- );
+ let st_val_2 = st.get(&val_2.account).unwrap();
+ assert_eq!(st_val_2.stake, U256::from(3));
+ assert_eq!(st_val_2.commission, val_2.commission);
+ assert_eq!(st_val_2.delegators.len(), 1);
+
+ events.push(ValidatorExit::from(&val_1).into());
+
+ let st = active_validator_set_from_l1_events(events.iter().cloned())?;
+ // The first validator should have been removed
+ assert_eq!(st.get(&val_1.account), None);
+
+ // The second validator should be unchanged
+ let st_val_2 = st.get(&val_2.account).unwrap();
+ assert_eq!(st_val_2.stake, U256::from(3));
+ assert_eq!(st_val_2.commission, val_2.commission);
+ assert_eq!(st_val_2.delegators.len(), 1);
+
+ // remove the 2nd validator
+ events.push(ValidatorExit::from(&val_2).into());
// This should fail because the validator has exited and no longer exists in the stake table.
assert!(active_validator_set_from_l1_events(events.iter().cloned()).is_err());
@@ -1565,25 +1906,16 @@ mod tests {
let val = TestValidator::random();
let delegator = Address::random();
- let register: StakeTableEvent = ValidatorRegistered {
- account: val.account,
- blsVk: val.bls_vk.clone(),
- schnorrVk: val.schnorr_vk.clone(),
- commission: val.commission,
- }
- .into();
+ let register: StakeTableEvent = ValidatorRegistered::from(&val).into();
+ let register_v2: StakeTableEvent = ValidatorRegisteredV2::from(&val).into();
let delegate: StakeTableEvent = Delegated {
delegator,
validator: val.account,
amount: U256::from(10),
}
.into();
- let key_update: StakeTableEvent = ConsensusKeysUpdated {
- account: val.account,
- blsVK: val.bls_vk.clone(),
- schnorrVK: val.schnorr_vk.clone(),
- }
- .into();
+ let key_update: StakeTableEvent = ConsensusKeysUpdated::from(&val).into();
+ let key_update_v2: StakeTableEvent = ConsensusKeysUpdatedV2::from(&val).into();
let undelegate: StakeTableEvent = Undelegated {
delegator,
validator: val.account,
@@ -1591,25 +1923,35 @@ mod tests {
}
.into();
- let exit: StakeTableEvent = ValidatorExit {
- validator: val.account,
- }
- .into();
+ let exit: StakeTableEvent = ValidatorExit::from(&val).into();
let cases = [
vec![exit],
vec![undelegate.clone()],
vec![delegate.clone()],
vec![key_update],
+ vec![key_update_v2],
vec![register.clone(), register.clone()],
- vec![register, delegate, undelegate.clone(), undelegate],
+ vec![register_v2.clone(), register_v2.clone()],
+ vec![register.clone(), register_v2.clone()],
+ vec![register_v2.clone(), register.clone()],
+ vec![
+ register,
+ delegate.clone(),
+ undelegate.clone(),
+ undelegate.clone(),
+ ],
+ vec![register_v2, delegate, undelegate.clone(), undelegate],
];
for events in cases.iter() {
- let res = active_validator_set_from_l1_events(events.iter().cloned());
+ // NOTE: not selecting the active validator set because we care about wrong sequences of
+ // events being detected. If we compute the active set we will also get an error if the
+ // set is empty but that's not what we want to test here.
+ let res = validators_from_l1_events(events.iter().cloned());
assert!(
res.is_err(),
- "events {:?}, not a valid sequencer of events",
+ "events {:?}, not a valid sequence of events",
res
);
}
@@ -1654,4 +1996,49 @@ mod tests {
}
}
}
+
+ // For a bug where the GCL did not match the stake table contract implementation and allowed
+ // duplicated BLS keys via the update keys events.
+ #[rstest::rstest]
+ fn test_regression_non_unique_bls_keys_not_discarded(
+ #[values(StakeTableContractVersion::V1, StakeTableContractVersion::V2)]
+ version: StakeTableContractVersion,
+ ) {
+ let val = TestValidator::random();
+ let register: StakeTableEvent = match version {
+ StakeTableContractVersion::V1 => ValidatorRegistered::from(&val).into(),
+ StakeTableContractVersion::V2 => ValidatorRegisteredV2::from(&val).into(),
+ };
+ let delegate: StakeTableEvent = Delegated {
+ delegator: Address::random(),
+ validator: val.account,
+ amount: U256::from(10),
+ }
+ .into();
+
+ // first ensure that wan build a valid stake table
+ assert!(active_validator_set_from_l1_events(
+ vec![register.clone(), delegate.clone()].into_iter()
+ )
+ .is_ok());
+
+ // add the invalid key update (re-using the same consensus keys)
+ let key_update = ConsensusKeysUpdated::from(&val).into();
+ assert!(active_validator_set_from_l1_events(
+ vec![register, delegate, key_update].into_iter()
+ )
+ .unwrap_err()
+ .to_string()
+ .contains("bls key already used"));
+ }
+
+ #[test]
+ fn test_display_log() {
+ let serialized = r#"{"address":"0x0000000000000000000000000000000000000069","topics":["0x0000000000000000000000000000000000000000000000000000000000000069"],"data":"0x69","blockHash":"0x0000000000000000000000000000000000000000000000000000000000000069","blockNumber":"0x69","blockTimestamp":"0x69","transactionHash":"0x0000000000000000000000000000000000000000000000000000000000000069","transactionIndex":"0x69","logIndex":"0x70","removed":false}"#;
+ let log: Log = serde_json::from_str(serialized).unwrap();
+ assert_eq!(
+ log.display(),
+ "Log(block=105,index=112,transaction_hash=0x0000000000000000000000000000000000000000000000000000000000000069)"
+ )
+ }
}
diff --git a/types/src/v0/v0_3/stake_table.rs b/types/src/v0/v0_3/stake_table.rs
index 30fe269b02..46f1b36919 100644
--- a/types/src/v0/v0_3/stake_table.rs
+++ b/types/src/v0/v0_3/stake_table.rs
@@ -4,8 +4,9 @@ use alloy::primitives::{Address, U256};
use async_lock::Mutex;
use derive_more::derive::{From, Into};
use hotshot::types::{BLSPubKey, SignatureKey};
-use hotshot_contract_adapter::sol_types::StakeTable::{
- ConsensusKeysUpdated, Delegated, Undelegated, ValidatorExit, ValidatorRegistered,
+use hotshot_contract_adapter::sol_types::StakeTableV2::{
+ ConsensusKeysUpdated, ConsensusKeysUpdatedV2, Delegated, Undelegated, ValidatorExit,
+ ValidatorRegistered, ValidatorRegisteredV2,
};
use hotshot_types::{
data::EpochNumber, light_client::StateVerKey, network::PeerConfigKeys, PeerConfig,
@@ -95,8 +96,10 @@ pub type EventKey = (u64, u64);
#[derive(Clone, derive_more::From, PartialEq, serde::Serialize, serde::Deserialize)]
pub enum StakeTableEvent {
Register(ValidatorRegistered),
+ RegisterV2(ValidatorRegisteredV2),
Deregister(ValidatorExit),
Delegate(Delegated),
Undelegate(Undelegated),
KeyUpdate(ConsensusKeysUpdated),
+ KeyUpdateV2(ConsensusKeysUpdatedV2),
}