diff --git a/account_manager/src/validator/exit.rs b/account_manager/src/validator/exit.rs index 5ea77f284e2..b0b5c8a59dc 100644 --- a/account_manager/src/validator/exit.rs +++ b/account_manager/src/validator/exit.rs @@ -102,7 +102,7 @@ pub fn cli_run(matches: &ArgMatches, env: Environment) -> Result< let client = BeaconNodeHttpClient::new( SensitiveUrl::parse(&server_url) .map_err(|e| format!("Failed to parse beacon http server: {:?}", e))?, - Timeouts::set_all(Duration::from_secs(env.eth2_config.spec.seconds_per_slot)), + Timeouts::set_all(env.eth2_config.spec.get_slot_duration()), ); let eth2_network_config = env @@ -230,7 +230,7 @@ async fn publish_voluntary_exit( loop { // Sleep for a slot duration and then check if voluntary exit was processed // by checking the validator status. - sleep(Duration::from_secs(spec.seconds_per_slot)).await; + sleep(spec.get_slot_duration()).await; let validator_data = get_validator_data(client, &keypair.pk).await?; match validator_data.status { @@ -251,7 +251,9 @@ async fn publish_voluntary_exit( eprintln!("Please keep your validator running till exit epoch"); eprintln!( "Exit epoch in approximately {} secs", - (exit_epoch - current_epoch) * spec.seconds_per_slot * E::slots_per_epoch() + (exit_epoch - current_epoch) + * spec.get_slot_duration().as_secs() + * E::slots_per_epoch() ); break; } @@ -350,7 +352,7 @@ fn get_current_epoch(genesis_time: u64, spec: &ChainSpec) -> Option< let slot_clock = SystemTimeSlotClock::new( spec.genesis_slot, Duration::from_secs(genesis_time), - Duration::from_secs(spec.seconds_per_slot), + spec.get_slot_duration(), ); slot_clock.now().map(|s| s.epoch(E::slots_per_epoch())) } diff --git a/beacon_node/beacon_chain/src/beacon_block_streamer.rs b/beacon_node/beacon_chain/src/beacon_block_streamer.rs index a462376cc03..edbdd6d4d9f 100644 --- a/beacon_node/beacon_chain/src/beacon_block_streamer.rs +++ b/beacon_node/beacon_chain/src/beacon_block_streamer.rs @@ -748,7 +748,8 @@ mod tests { .execution_block_generator() .move_to_terminal_block() .expect("should move to terminal block"); - let timestamp = harness.get_timestamp_at_slot() + harness.spec.seconds_per_slot; + let timestamp = + harness.get_timestamp_at_slot() + harness.spec.get_slot_duration().as_secs(); harness .execution_block_generator() .modify_last_block(|block| { diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 46ba14f596b..8ff8357754c 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -4630,7 +4630,8 @@ impl BeaconChain { // 1. It seems we have time to propagate and still receive the proposer boost. // 2. The current head block was seen late. // 3. The `get_proposer_head` conditions from fork choice pass. - let proposing_on_time = slot_delay < self.config.re_org_cutoff(self.spec.seconds_per_slot); + let proposing_on_time = + slot_delay < self.config.re_org_cutoff(self.spec.get_slot_duration()); if !proposing_on_time { debug!(reason = "not proposing on time", "Not attempting re-org"); return None; @@ -4920,7 +4921,7 @@ impl BeaconChain { .and_then(|slot_start| { let now = self.slot_clock.now_duration()?; let slot_delay = now.saturating_sub(slot_start); - Some(slot_delay <= self.config.re_org_cutoff(self.spec.seconds_per_slot)) + Some(slot_delay <= self.config.re_org_cutoff(self.spec.get_slot_duration())) }) .unwrap_or(false) } else { @@ -5035,9 +5036,13 @@ impl BeaconChain { .start_of(slot) .unwrap_or_else(|| Duration::from_secs(0)), ); - block_delays - .observed - .is_some_and(|delay| delay >= self.slot_clock.unagg_attestation_production_delay()) + block_delays.observed.is_some_and(|delay| { + delay + >= self + .spec + .get_unaggregated_attestation_due() + .unwrap_or_else(|_| Duration::from_secs(0)) + }) } /// Produce a block for some `slot` upon the given `state`. diff --git a/beacon_node/beacon_chain/src/bellatrix_readiness.rs b/beacon_node/beacon_chain/src/bellatrix_readiness.rs index 412870354b9..88ccc21b855 100644 --- a/beacon_node/beacon_chain/src/bellatrix_readiness.rs +++ b/beacon_node/beacon_chain/src/bellatrix_readiness.rs @@ -147,7 +147,7 @@ impl BeaconChain { if let Some(bellatrix_epoch) = self.spec.bellatrix_fork_epoch { let bellatrix_slot = bellatrix_epoch.start_slot(T::EthSpec::slots_per_epoch()); let bellatrix_readiness_preparation_slots = - BELLATRIX_READINESS_PREPARATION_SECONDS / self.spec.seconds_per_slot; + BELLATRIX_READINESS_PREPARATION_SECONDS / self.spec.get_slot_duration().as_secs(); if self.execution_layer.is_some() { // The user has already configured an execution layer, start checking for readiness diff --git a/beacon_node/beacon_chain/src/canonical_head.rs b/beacon_node/beacon_chain/src/canonical_head.rs index 7dd4c88c513..690e5dba965 100644 --- a/beacon_node/beacon_chain/src/canonical_head.rs +++ b/beacon_node/beacon_chain/src/canonical_head.rs @@ -863,6 +863,7 @@ impl BeaconChain { .as_utf8_lossy(), &self.slot_clock, self.event_handler.as_ref(), + &self.spec, ); if is_epoch_transition || reorg_distance.is_some() { @@ -1329,6 +1330,7 @@ fn observe_head_block_delays( head_block_graffiti: String, slot_clock: &S, event_handler: Option<&ServerSentEventHandler>, + spec: &ChainSpec, ) { let Some(block_time_set_as_head) = slot_clock.now_duration() else { // Practically unreachable: the slot clock's time should not be before the UNIX epoch. @@ -1458,7 +1460,10 @@ fn observe_head_block_delays( // Determine whether the block has been set as head too late for proper attestation // production. - let late_head = attestable_delay >= slot_clock.unagg_attestation_production_delay(); + let late_head = attestable_delay + >= spec + .get_unaggregated_attestation_due() + .unwrap_or(block_delay_total); // If the block was enshrined as head too late for attestations to be created for it, // log a debug warning and increment a metric. diff --git a/beacon_node/beacon_chain/src/chain_config.rs b/beacon_node/beacon_chain/src/chain_config.rs index 1f5abc4891b..711ffdc99c2 100644 --- a/beacon_node/beacon_chain/src/chain_config.rs +++ b/beacon_node/beacon_chain/src/chain_config.rs @@ -168,11 +168,9 @@ impl Default for ChainConfig { impl ChainConfig { /// The latest delay from the start of the slot at which to attempt a 1-slot re-org. - pub fn re_org_cutoff(&self, seconds_per_slot: u64) -> Duration { + pub fn re_org_cutoff(&self, slot_duration: Duration) -> Duration { self.re_org_cutoff_millis .map(Duration::from_millis) - .unwrap_or_else(|| { - Duration::from_secs(seconds_per_slot) / DEFAULT_RE_ORG_CUTOFF_DENOMINATOR - }) + .unwrap_or_else(|| slot_duration / DEFAULT_RE_ORG_CUTOFF_DENOMINATOR) } } diff --git a/beacon_node/beacon_chain/src/data_availability_checker.rs b/beacon_node/beacon_chain/src/data_availability_checker.rs index 3e859456b18..6e6d0024b3f 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker.rs @@ -1178,7 +1178,7 @@ mod test { let slot_clock = TestingSlotClock::new( Slot::new(0), Duration::from_secs(0), - Duration::from_secs(spec.seconds_per_slot), + spec.get_slot_duration(), ); let kzg = get_kzg(&spec); let store = Arc::new(HotColdDB::open_ephemeral(<_>::default(), spec.clone()).unwrap()); diff --git a/beacon_node/beacon_chain/src/light_client_finality_update_verification.rs b/beacon_node/beacon_chain/src/light_client_finality_update_verification.rs index 2dc4de7d04b..9cad6dc269a 100644 --- a/beacon_node/beacon_chain/src/light_client_finality_update_verification.rs +++ b/beacon_node/beacon_chain/src/light_client_finality_update_verification.rs @@ -75,7 +75,10 @@ impl VerifiedLightClientFinalityUpdate { .slot_clock .start_of(rcv_finality_update.signature_slot()) .ok_or(Error::SigSlotStartIsNone)?; - let one_third_slot_duration = Duration::new(chain.spec.seconds_per_slot / 3, 0); + let one_third_slot_duration = chain + .spec + .get_sync_message_due() + .map_err(|_| Error::Ignore)?; if seen_timestamp + chain.spec.maximum_gossip_clock_disparity() < start_time + one_third_slot_duration { diff --git a/beacon_node/beacon_chain/src/light_client_optimistic_update_verification.rs b/beacon_node/beacon_chain/src/light_client_optimistic_update_verification.rs index 4079a374f89..df9a95d2760 100644 --- a/beacon_node/beacon_chain/src/light_client_optimistic_update_verification.rs +++ b/beacon_node/beacon_chain/src/light_client_optimistic_update_verification.rs @@ -70,7 +70,12 @@ impl VerifiedLightClientOptimisticUpdate { .slot_clock .start_of(rcv_optimistic_update.signature_slot()) .ok_or(Error::SigSlotStartIsNone)?; - let one_third_slot_duration = Duration::new(chain.spec.seconds_per_slot / 3, 0); + + let one_third_slot_duration = chain + .spec + .get_sync_message_due() + .map_err(|_| Error::Ignore)?; + if seen_timestamp + chain.spec.maximum_gossip_clock_disparity() < start_time + one_third_slot_duration { diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 6d17d6d85c5..466d284ce7e 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -503,21 +503,26 @@ where .expect("cannot recalculate fork times without spec"); mock.server.execution_block_generator().shanghai_time = spec.capella_fork_epoch.map(|epoch| { - genesis_time + spec.seconds_per_slot * E::slots_per_epoch() * epoch.as_u64() + genesis_time + + spec.get_slot_duration().as_secs() * E::slots_per_epoch() * epoch.as_u64() }); mock.server.execution_block_generator().cancun_time = spec.deneb_fork_epoch.map(|epoch| { - genesis_time + spec.seconds_per_slot * E::slots_per_epoch() * epoch.as_u64() + genesis_time + + spec.get_slot_duration().as_secs() * E::slots_per_epoch() * epoch.as_u64() }); mock.server.execution_block_generator().prague_time = spec.electra_fork_epoch.map(|epoch| { - genesis_time + spec.seconds_per_slot * E::slots_per_epoch() * epoch.as_u64() + genesis_time + + spec.get_slot_duration().as_secs() * E::slots_per_epoch() * epoch.as_u64() }); mock.server.execution_block_generator().osaka_time = spec.fulu_fork_epoch.map(|epoch| { - genesis_time + spec.seconds_per_slot * E::slots_per_epoch() * epoch.as_u64() + genesis_time + + spec.get_slot_duration().as_secs() * E::slots_per_epoch() * epoch.as_u64() }); mock.server.execution_block_generator().amsterdam_time = spec.gloas_fork_epoch.map(|epoch| { - genesis_time + spec.seconds_per_slot * E::slots_per_epoch() * epoch.as_u64() + genesis_time + + spec.get_slot_duration().as_secs() * E::slots_per_epoch() * epoch.as_u64() }); self @@ -562,7 +567,6 @@ where let (shutdown_tx, shutdown_receiver) = futures::channel::mpsc::channel(1); let spec = self.spec.expect("cannot build without spec"); - let seconds_per_slot = spec.seconds_per_slot; let validator_keypairs = self .validator_keypairs .expect("cannot build without validator keypairs"); @@ -607,7 +611,7 @@ where builder.slot_clock(testing_slot_clock) } else if builder.get_slot_clock().is_none() { builder - .testing_slot_clock(Duration::from_secs(seconds_per_slot)) + .testing_slot_clock(spec.get_slot_duration()) .expect("should configure testing slot clock") } else { builder @@ -634,19 +638,24 @@ pub fn mock_execution_layer_from_parts( task_executor: TaskExecutor, ) -> MockExecutionLayer { let shanghai_time = spec.capella_fork_epoch.map(|epoch| { - HARNESS_GENESIS_TIME + spec.seconds_per_slot * E::slots_per_epoch() * epoch.as_u64() + HARNESS_GENESIS_TIME + + (spec.get_slot_duration().as_secs()) * E::slots_per_epoch() * epoch.as_u64() }); let cancun_time = spec.deneb_fork_epoch.map(|epoch| { - HARNESS_GENESIS_TIME + spec.seconds_per_slot * E::slots_per_epoch() * epoch.as_u64() + HARNESS_GENESIS_TIME + + (spec.get_slot_duration().as_secs()) * E::slots_per_epoch() * epoch.as_u64() }); let prague_time = spec.electra_fork_epoch.map(|epoch| { - HARNESS_GENESIS_TIME + spec.seconds_per_slot * E::slots_per_epoch() * epoch.as_u64() + HARNESS_GENESIS_TIME + + (spec.get_slot_duration().as_secs()) * E::slots_per_epoch() * epoch.as_u64() }); let osaka_time = spec.fulu_fork_epoch.map(|epoch| { - HARNESS_GENESIS_TIME + spec.seconds_per_slot * E::slots_per_epoch() * epoch.as_u64() + HARNESS_GENESIS_TIME + + (spec.get_slot_duration().as_secs()) * E::slots_per_epoch() * epoch.as_u64() }); let amsterdam_time = spec.gloas_fork_epoch.map(|epoch| { - HARNESS_GENESIS_TIME + spec.seconds_per_slot * E::slots_per_epoch() * epoch.as_u64() + HARNESS_GENESIS_TIME + + (spec.get_slot_duration().as_secs()) * E::slots_per_epoch() * epoch.as_u64() }); let kzg = get_kzg(&spec); diff --git a/beacon_node/beacon_chain/src/validator_custody.rs b/beacon_node/beacon_chain/src/validator_custody.rs new file mode 100644 index 00000000000..719087f5780 --- /dev/null +++ b/beacon_node/beacon_chain/src/validator_custody.rs @@ -0,0 +1,790 @@ +use parking_lot::RwLock; +use ssz_derive::{Decode, Encode}; +use std::marker::PhantomData; +use std::sync::OnceLock; +use std::{ + collections::{BTreeMap, HashMap}, + sync::atomic::{AtomicU64, Ordering}, +}; +use types::data_column_custody_group::{CustodyIndex, compute_columns_for_custody_group}; +use types::{ChainSpec, ColumnIndex, Epoch, EthSpec, Slot}; + +/// A delay before making the CGC change effective to the data availability checker. +const CUSTODY_CHANGE_DA_EFFECTIVE_DELAY_SECONDS: u64 = 30; + +/// Number of slots after which a validator's registration is removed if it has not re-registered. +const VALIDATOR_REGISTRATION_EXPIRY_SLOTS: Slot = Slot::new(256); + +type ValidatorsAndBalances = Vec<(usize, u64)>; +type SlotAndEffectiveBalance = (Slot, u64); + +/// This currently just registers increases in validator count. +/// Does not handle decreasing validator counts +#[derive(Default, Debug)] +struct ValidatorRegistrations { + /// Set of all validators that is registered to this node along with its effective balance + /// + /// Key is validator index and value is effective_balance. + validators: HashMap, + /// Maintains the validator custody requirement at a given epoch. + /// + /// Note: Only stores the epoch value when there's a change in custody requirement. + /// So if epoch 10 and 11 has the same custody requirement, only 10 is stored. + /// This map is never pruned, because currently we never decrease custody requirement, so this + /// map size is contained at 128. + epoch_validator_custody_requirements: BTreeMap, +} + +impl ValidatorRegistrations { + /// Returns the validator custody requirement at the latest epoch. + fn latest_validator_custody_requirement(&self) -> Option { + self.epoch_validator_custody_requirements + .last_key_value() + .map(|(_, v)| *v) + } + + /// Lookup the active custody requirement at the given epoch. + fn custody_requirement_at_epoch(&self, epoch: Epoch) -> Option { + self.epoch_validator_custody_requirements + .range(..=epoch) + .last() + .map(|(_, custody_count)| *custody_count) + } + + /// Register a new validator index and updates the list of validators if required. + /// Returns `Some((effective_epoch, new_cgc))` if the registration results in a CGC update. + pub(crate) fn register_validators( + &mut self, + validators_and_balance: ValidatorsAndBalances, + current_slot: Slot, + spec: &ChainSpec, + ) -> Option<(Epoch, u64)> { + for (validator_index, effective_balance) in validators_and_balance { + self.validators + .insert(validator_index, (current_slot, effective_balance)); + } + + // Drop validators that haven't re-registered with the node for `VALIDATOR_REGISTRATION_EXPIRY_SLOTS`. + self.validators + .retain(|_, (slot, _)| *slot >= current_slot - VALIDATOR_REGISTRATION_EXPIRY_SLOTS); + + // Each `BALANCE_PER_ADDITIONAL_CUSTODY_GROUP` effectively contributes one unit of "weight". + let validator_custody_units = self.validators.values().map(|(_, eb)| eb).sum::() + / spec.balance_per_additional_custody_group; + let validator_custody_requirement = + get_validators_custody_requirement(validator_custody_units, spec); + + tracing::debug!( + validator_custody_units, + validator_custody_requirement, + "Registered validators" + ); + + // If registering the new validator increased the total validator "units", then + // add a new entry for the current epoch + if Some(validator_custody_requirement) > self.latest_validator_custody_requirement() { + // Apply the change from the next epoch after adding some delay buffer to ensure + // the node has enough time to subscribe to subnets etc, and to avoid having + // inconsistent column counts within an epoch. + let effective_delay_slots = + CUSTODY_CHANGE_DA_EFFECTIVE_DELAY_SECONDS / spec.get_slot_duration().as_secs(); + let effective_epoch = + (current_slot + effective_delay_slots).epoch(E::slots_per_epoch()) + 1; + self.epoch_validator_custody_requirements + .entry(effective_epoch) + .and_modify(|old_custody| *old_custody = validator_custody_requirement) + .or_insert(validator_custody_requirement); + Some((effective_epoch, validator_custody_requirement)) + } else { + None + } + } +} + +/// Given the `validator_custody_units`, return the custody requirement based on +/// the spec parameters. +/// +/// Note: a `validator_custody_units` here represents the number of 32 eth effective_balance +/// equivalent to `BALANCE_PER_ADDITIONAL_CUSTODY_GROUP`. +/// +/// For e.g. a validator with eb 32 eth is 1 unit. +/// a validator with eb 65 eth is 65 // 32 = 2 units. +/// +/// See https://github.com/ethereum/consensus-specs/blob/dev/specs/fulu/validator.md#validator-custody +fn get_validators_custody_requirement(validator_custody_units: u64, spec: &ChainSpec) -> u64 { + std::cmp::min( + std::cmp::max(validator_custody_units, spec.validator_custody_requirement), + spec.number_of_custody_groups, + ) +} + +/// Contains all the information the node requires to calculate the +/// number of columns to be custodied when checking for DA. +#[derive(Debug)] +pub struct CustodyContext { + /// The Number of custody groups required based on the number of validators + /// that is attached to this node. + /// + /// This is the number that we use to compute the custody group count that + /// we require for data availability check, and we use to advertise to our peers in the metadata + /// and enr values. + validator_custody_count: AtomicU64, + /// Is the node run as a supernode based on current cli parameters. + current_is_supernode: bool, + /// The persisted value for `is_supernode` based on the previous run of this node. + /// + /// Note: We require this value because if a user restarts the node with a higher cli custody + /// count value than in the previous run, then we should continue advertising the custody + /// count based on the old value than the new one since we haven't backfilled the required + /// columns. + persisted_is_supernode: bool, + /// Maintains all the validators that this node is connected to currently + validator_registrations: RwLock, + /// Stores an immutable, ordered list of all custody columns as determined by the node's NodeID + /// on startup. + all_custody_columns_ordered: OnceLock>, + _phantom_data: PhantomData, +} + +impl CustodyContext { + /// Create a new custody default custody context object when no persisted object + /// exists. + /// + /// The `is_supernode` value is based on current cli parameters. + pub fn new(is_supernode: bool) -> Self { + Self { + validator_custody_count: AtomicU64::new(0), + current_is_supernode: is_supernode, + persisted_is_supernode: is_supernode, + validator_registrations: Default::default(), + all_custody_columns_ordered: OnceLock::new(), + _phantom_data: PhantomData, + } + } + + pub fn new_from_persisted_custody_context( + ssz_context: CustodyContextSsz, + is_supernode: bool, + ) -> Self { + CustodyContext { + validator_custody_count: AtomicU64::new(ssz_context.validator_custody_at_head), + current_is_supernode: is_supernode, + persisted_is_supernode: ssz_context.persisted_is_supernode, + validator_registrations: RwLock::new(ValidatorRegistrations { + validators: Default::default(), + epoch_validator_custody_requirements: ssz_context + .epoch_validator_custody_requirements + .into_iter() + .collect(), + }), + all_custody_columns_ordered: OnceLock::new(), + _phantom_data: PhantomData, + } + } + + /// Initializes an ordered list of data columns based on provided custody groups. + /// + /// # Arguments + /// * `all_custody_groups_ordered` - Vector of custody group indices to map to columns + /// * `spec` - Chain specification containing custody parameters + /// + /// # Returns + /// Ok(()) if initialization succeeds, Err with description string if it fails + pub fn init_ordered_data_columns_from_custody_groups( + &self, + all_custody_groups_ordered: Vec, + spec: &ChainSpec, + ) -> Result<(), String> { + let mut ordered_custody_columns = vec![]; + for custody_index in all_custody_groups_ordered { + let columns = compute_columns_for_custody_group::(custody_index, spec) + .map_err(|e| format!("Failed to compute columns for custody group {e:?}"))?; + ordered_custody_columns.extend(columns); + } + self.all_custody_columns_ordered + .set(ordered_custody_columns.into_boxed_slice()) + .map_err(|_| { + "Failed to initialise CustodyContext with computed custody columns".to_string() + }) + } + + /// Register a new validator index and updates the list of validators if required. + /// + /// Also modifies the internal structures if the validator custody has changed to + /// update the `custody_column_count`. + /// + /// Returns `Some` along with the updated custody group count if it has changed, otherwise returns `None`. + pub fn register_validators( + &self, + validators_and_balance: ValidatorsAndBalances, + current_slot: Slot, + spec: &ChainSpec, + ) -> Option { + let Some((effective_epoch, new_validator_custody)) = self + .validator_registrations + .write() + .register_validators::(validators_and_balance, current_slot, spec) + else { + return None; + }; + + let current_cgc = self.custody_group_count_at_head(spec); + let validator_custody_count_at_head = self.validator_custody_count.load(Ordering::Relaxed); + + if new_validator_custody != validator_custody_count_at_head { + tracing::debug!( + old_count = validator_custody_count_at_head, + new_count = new_validator_custody, + "Validator count at head updated" + ); + self.validator_custody_count + .store(new_validator_custody, Ordering::Relaxed); + + let updated_cgc = self.custody_group_count_at_head(spec); + // Send the message to network only if there are more columns subnets to subscribe to + if updated_cgc > current_cgc { + tracing::debug!( + old_cgc = current_cgc, + updated_cgc, + "Custody group count updated" + ); + return Some(CustodyCountChanged { + new_custody_group_count: updated_cgc, + sampling_count: self.num_of_custody_groups_to_sample(effective_epoch, spec), + effective_epoch, + }); + } + } + + None + } + + /// This function is used to determine the custody group count at head ONLY. + /// Do NOT use this directly for data availability check, use `self.sampling_size` instead as + /// CGC can change over epochs. + pub fn custody_group_count_at_head(&self, spec: &ChainSpec) -> u64 { + if self.current_is_supernode { + return spec.number_of_custody_groups; + } + let validator_custody_count_at_head = self.validator_custody_count.load(Ordering::Relaxed); + + // If there are no validators, return the minimum custody_requirement + if validator_custody_count_at_head > 0 { + validator_custody_count_at_head + } else { + spec.custody_requirement + } + } + + /// This function is used to determine the custody group count at a given epoch. + /// + /// This differs from the number of custody groups sampled per slot, as the spec requires a + /// minimum sampling size which may exceed the custody group count (CGC). + /// + /// See also: [`Self::num_of_custody_groups_to_sample`]. + fn custody_group_count_at_epoch(&self, epoch: Epoch, spec: &ChainSpec) -> u64 { + if self.current_is_supernode { + spec.number_of_custody_groups + } else { + self.validator_registrations + .read() + .custody_requirement_at_epoch(epoch) + .unwrap_or(spec.custody_requirement) + } + } + + /// Returns the count of custody groups this node must _sample_ for a block at `epoch` to import. + pub fn num_of_custody_groups_to_sample(&self, epoch: Epoch, spec: &ChainSpec) -> u64 { + let custody_group_count = self.custody_group_count_at_epoch(epoch, spec); + spec.sampling_size_custody_groups(custody_group_count) + .expect("should compute node sampling size from valid chain spec") + } + + /// Returns the count of columns this node must _sample_ for a block at `epoch` to import. + pub fn num_of_data_columns_to_sample(&self, epoch: Epoch, spec: &ChainSpec) -> usize { + let custody_group_count = self.custody_group_count_at_epoch(epoch, spec); + spec.sampling_size_columns::(custody_group_count) + .expect("should compute node sampling size from valid chain spec") + } + + /// Returns whether the node should attempt reconstruction at a given epoch. + pub fn should_attempt_reconstruction(&self, epoch: Epoch, spec: &ChainSpec) -> bool { + let min_columns_for_reconstruction = E::number_of_columns() / 2; + // performing reconstruction is not necessary if sampling column count is exactly 50%, + // because the node doesn't need the remaining columns. + self.num_of_data_columns_to_sample(epoch, spec) > min_columns_for_reconstruction + } + + /// Returns the ordered list of column indices that should be sampled for data availability checking at the given epoch. + /// + /// # Parameters + /// * `epoch` - Epoch to determine sampling columns for + /// * `spec` - Chain specification containing sampling parameters + /// + /// # Returns + /// A slice of ordered column indices that should be sampled for this epoch based on the node's custody configuration + pub fn sampling_columns_for_epoch(&self, epoch: Epoch, spec: &ChainSpec) -> &[ColumnIndex] { + let num_of_columns_to_sample = self.num_of_data_columns_to_sample(epoch, spec); + let all_columns_ordered = self + .all_custody_columns_ordered + .get() + .expect("all_custody_columns_ordered should be initialized"); + &all_columns_ordered[..num_of_columns_to_sample] + } + + /// Returns the ordered list of column indices that the node is assigned to custody + /// (and advertised to peers) at the given epoch. If epoch is `None`, this function + /// computes the custody columns at head. + /// + /// This method differs from [`self::sampling_columns_for_epoch`] which returns all sampling columns. + /// The columns returned by this method are either identical to or a subset of the sampling columns, + /// representing only those columns that this node is responsible for maintaining custody of. + /// + /// # Parameters + /// * `epoch_opt` - Optional epoch to determine custody columns for. + /// + /// # Returns + /// A slice of ordered custody column indices for this epoch based on the node's custody configuration + pub fn custody_columns_for_epoch( + &self, + epoch_opt: Option, + spec: &ChainSpec, + ) -> &[ColumnIndex] { + let custody_group_count = if let Some(epoch) = epoch_opt { + self.custody_group_count_at_epoch(epoch, spec) as usize + } else { + self.custody_group_count_at_head(spec) as usize + }; + + let all_columns_ordered = self + .all_custody_columns_ordered + .get() + .expect("all_custody_columns_ordered should be initialized"); + &all_columns_ordered[..custody_group_count] + } +} + +/// The custody count changed because of a change in the +/// number of validators being managed. +pub struct CustodyCountChanged { + pub new_custody_group_count: u64, + pub sampling_count: u64, + pub effective_epoch: Epoch, +} + +/// The custody information that gets persisted across runs. +#[derive(Debug, Encode, Decode, Clone)] +pub struct CustodyContextSsz { + pub validator_custody_at_head: u64, + pub persisted_is_supernode: bool, + pub epoch_validator_custody_requirements: Vec<(Epoch, u64)>, +} + +impl From<&CustodyContext> for CustodyContextSsz { + fn from(context: &CustodyContext) -> Self { + CustodyContextSsz { + validator_custody_at_head: context.validator_custody_count.load(Ordering::Relaxed), + persisted_is_supernode: context.persisted_is_supernode, + epoch_validator_custody_requirements: context + .validator_registrations + .read() + .epoch_validator_custody_requirements + .iter() + .map(|(epoch, count)| (*epoch, *count)) + .collect(), + } + } +} + +#[cfg(test)] +mod tests { + use rand::rng; + use rand::seq::SliceRandom; + use types::MainnetEthSpec; + + use super::*; + + type E = MainnetEthSpec; + + #[test] + fn no_validators_supernode_default() { + let custody_context = CustodyContext::::new(true); + let spec = E::default_spec(); + assert_eq!( + custody_context.custody_group_count_at_head(&spec), + spec.number_of_custody_groups + ); + assert_eq!( + custody_context.num_of_custody_groups_to_sample(Epoch::new(0), &spec), + spec.number_of_custody_groups + ); + } + + #[test] + fn no_validators_fullnode_default() { + let custody_context = CustodyContext::::new(false); + let spec = E::default_spec(); + assert_eq!( + custody_context.custody_group_count_at_head(&spec), + spec.custody_requirement, + "head custody count should be minimum spec custody requirement" + ); + assert_eq!( + custody_context.num_of_custody_groups_to_sample(Epoch::new(0), &spec), + spec.samples_per_slot + ); + } + + #[test] + fn register_single_validator_should_update_cgc() { + let custody_context = CustodyContext::::new(false); + let spec = E::default_spec(); + let bal_per_additional_group = spec.balance_per_additional_custody_group; + let min_val_custody_requirement = spec.validator_custody_requirement; + // One single node increases its balance over 3 epochs. + let validators_and_expected_cgc_change = vec![ + ( + vec![(0, bal_per_additional_group)], + Some(min_val_custody_requirement), + ), + // No CGC change at 8 custody units, as it's the minimum requirement + (vec![(0, 8 * bal_per_additional_group)], None), + (vec![(0, 10 * bal_per_additional_group)], Some(10)), + ]; + + register_validators_and_assert_cgc::( + &custody_context, + validators_and_expected_cgc_change, + &spec, + ); + } + + #[test] + fn register_multiple_validators_should_update_cgc() { + let custody_context = CustodyContext::::new(false); + let spec = E::default_spec(); + let bal_per_additional_group = spec.balance_per_additional_custody_group; + let min_val_custody_requirement = spec.validator_custody_requirement; + // Add 3 validators over 3 epochs. + let validators_and_expected_cgc = vec![ + ( + vec![(0, bal_per_additional_group)], + Some(min_val_custody_requirement), + ), + ( + vec![ + (0, bal_per_additional_group), + (1, 7 * bal_per_additional_group), + ], + // No CGC change at 8 custody units, as it's the minimum requirement + None, + ), + ( + vec![ + (0, bal_per_additional_group), + (1, 7 * bal_per_additional_group), + (2, 2 * bal_per_additional_group), + ], + Some(10), + ), + ]; + + register_validators_and_assert_cgc::( + &custody_context, + validators_and_expected_cgc, + &spec, + ); + } + + #[test] + fn register_validators_should_not_update_cgc_for_supernode() { + let custody_context = CustodyContext::::new(true); + let spec = E::default_spec(); + let bal_per_additional_group = spec.balance_per_additional_custody_group; + + // Add 3 validators over 3 epochs. + let validators_and_expected_cgc = vec![ + (vec![(0, bal_per_additional_group)], None), + ( + vec![ + (0, bal_per_additional_group), + (1, 7 * bal_per_additional_group), + ], + None, + ), + ( + vec![ + (0, bal_per_additional_group), + (1, 7 * bal_per_additional_group), + (2, 2 * bal_per_additional_group), + ], + None, + ), + ]; + + register_validators_and_assert_cgc::( + &custody_context, + validators_and_expected_cgc, + &spec, + ); + let current_epoch = Epoch::new(2); + assert_eq!( + custody_context.num_of_custody_groups_to_sample(current_epoch, &spec), + spec.number_of_custody_groups + ); + } + + #[test] + fn cgc_change_should_be_effective_to_sampling_after_delay() { + let custody_context = CustodyContext::::new(false); + let spec = E::default_spec(); + let current_slot = Slot::new(10); + let current_epoch = current_slot.epoch(E::slots_per_epoch()); + let default_sampling_size = + custody_context.num_of_custody_groups_to_sample(current_epoch, &spec); + let validator_custody_units = 10; + + let _cgc_changed = custody_context.register_validators( + vec![( + 0, + validator_custody_units * spec.balance_per_additional_custody_group, + )], + current_slot, + &spec, + ); + + // CGC update is not applied for `current_epoch`. + assert_eq!( + custody_context.num_of_custody_groups_to_sample(current_epoch, &spec), + default_sampling_size + ); + // CGC update is applied for the next epoch. + assert_eq!( + custody_context.num_of_custody_groups_to_sample(current_epoch + 1, &spec), + validator_custody_units + ); + } + + #[test] + fn validator_dropped_after_no_registrations_within_expiry_should_not_reduce_cgc() { + let custody_context = CustodyContext::::new(false); + let spec = E::default_spec(); + let current_slot = Slot::new(10); + let val_custody_units_1 = 10; + let val_custody_units_2 = 5; + + // GIVEN val_1 and val_2 registered at `current_slot` + let _ = custody_context.register_validators( + vec![ + ( + 1, + val_custody_units_1 * spec.balance_per_additional_custody_group, + ), + ( + 2, + val_custody_units_2 * spec.balance_per_additional_custody_group, + ), + ], + current_slot, + &spec, + ); + + // WHEN val_1 re-registered, but val_2 did not re-register after `VALIDATOR_REGISTRATION_EXPIRY_SLOTS + 1` slots + let cgc_changed_opt = custody_context.register_validators( + vec![( + 1, + val_custody_units_1 * spec.balance_per_additional_custody_group, + )], + current_slot + VALIDATOR_REGISTRATION_EXPIRY_SLOTS + 1, + &spec, + ); + + // THEN the reduction from dropping val_2 balance should NOT result in a CGC reduction + assert!(cgc_changed_opt.is_none(), "CGC should remain unchanged"); + assert_eq!( + custody_context.custody_group_count_at_head(&spec), + val_custody_units_1 + val_custody_units_2 + ) + } + + #[test] + fn validator_dropped_after_no_registrations_within_expiry() { + let custody_context = CustodyContext::::new(false); + let spec = E::default_spec(); + let current_slot = Slot::new(10); + let val_custody_units_1 = 10; + let val_custody_units_2 = 5; + let val_custody_units_3 = 6; + + // GIVEN val_1 and val_2 registered at `current_slot` + let _ = custody_context.register_validators( + vec![ + ( + 1, + val_custody_units_1 * spec.balance_per_additional_custody_group, + ), + ( + 2, + val_custody_units_2 * spec.balance_per_additional_custody_group, + ), + ], + current_slot, + &spec, + ); + + // WHEN val_1 and val_3 registered, but val_3 did not re-register after `VALIDATOR_REGISTRATION_EXPIRY_SLOTS + 1` slots + let cgc_changed = custody_context.register_validators( + vec![ + ( + 1, + val_custody_units_1 * spec.balance_per_additional_custody_group, + ), + ( + 3, + val_custody_units_3 * spec.balance_per_additional_custody_group, + ), + ], + current_slot + VALIDATOR_REGISTRATION_EXPIRY_SLOTS + 1, + &spec, + ); + + // THEN CGC should increase, BUT val_2 balance should NOT be included in CGC + assert_eq!( + cgc_changed + .expect("CGC should change") + .new_custody_group_count, + val_custody_units_1 + val_custody_units_3 + ); + } + + #[test] + fn should_init_ordered_data_columns_and_return_sampling_columns() { + let spec = E::default_spec(); + let custody_context = CustodyContext::::new(false); + let sampling_size = custody_context.num_of_data_columns_to_sample(Epoch::new(0), &spec); + + // initialise ordered columns + let mut all_custody_groups_ordered = (0..spec.number_of_custody_groups).collect::>(); + all_custody_groups_ordered.shuffle(&mut rng()); + + custody_context + .init_ordered_data_columns_from_custody_groups( + all_custody_groups_ordered.clone(), + &spec, + ) + .expect("should initialise ordered data columns"); + + let actual_sampling_columns = + custody_context.sampling_columns_for_epoch(Epoch::new(0), &spec); + + let expected_sampling_columns = &all_custody_groups_ordered + .iter() + .flat_map(|custody_index| { + compute_columns_for_custody_group::(*custody_index, &spec) + .expect("should compute columns for custody group") + }) + .collect::>()[0..sampling_size]; + + assert_eq!(actual_sampling_columns, expected_sampling_columns) + } + + /// Update the validator every epoch and assert cgc against expected values. + fn register_validators_and_assert_cgc( + custody_context: &CustodyContext, + validators_and_expected_cgc_changed: Vec<(ValidatorsAndBalances, Option)>, + spec: &ChainSpec, + ) { + for (idx, (validators_and_balance, expected_cgc_change)) in + validators_and_expected_cgc_changed.into_iter().enumerate() + { + let epoch = Epoch::new(idx as u64); + let updated_custody_count_opt = custody_context + .register_validators( + validators_and_balance, + epoch.start_slot(E::slots_per_epoch()), + spec, + ) + .map(|c| c.new_custody_group_count); + + assert_eq!(updated_custody_count_opt, expected_cgc_change); + } + } + + #[test] + fn custody_columns_for_epoch_no_validators_fullnode() { + let custody_context = CustodyContext::::new(false); + let spec = E::default_spec(); + let all_custody_groups_ordered = (0..spec.number_of_custody_groups).collect::>(); + + custody_context + .init_ordered_data_columns_from_custody_groups(all_custody_groups_ordered, &spec) + .expect("should initialise ordered data columns"); + + assert_eq!( + custody_context.custody_columns_for_epoch(None, &spec).len(), + spec.custody_requirement as usize + ); + } + + #[test] + fn custody_columns_for_epoch_no_validators_supernode() { + let custody_context = CustodyContext::::new(true); + let spec = E::default_spec(); + let all_custody_groups_ordered = (0..spec.number_of_custody_groups).collect::>(); + + custody_context + .init_ordered_data_columns_from_custody_groups(all_custody_groups_ordered, &spec) + .expect("should initialise ordered data columns"); + + assert_eq!( + custody_context.custody_columns_for_epoch(None, &spec).len(), + spec.number_of_custody_groups as usize + ); + } + + #[test] + fn custody_columns_for_epoch_with_validators_should_match_cgc() { + let custody_context = CustodyContext::::new(false); + let spec = E::default_spec(); + let all_custody_groups_ordered = (0..spec.number_of_custody_groups).collect::>(); + let val_custody_units = 10; + + custody_context + .init_ordered_data_columns_from_custody_groups(all_custody_groups_ordered, &spec) + .expect("should initialise ordered data columns"); + + let _ = custody_context.register_validators( + vec![( + 0, + val_custody_units * spec.balance_per_additional_custody_group, + )], + Slot::new(10), + &spec, + ); + + assert_eq!( + custody_context.custody_columns_for_epoch(None, &spec).len(), + val_custody_units as usize + ); + } + + #[test] + fn custody_columns_for_epoch_specific_epoch_uses_epoch_cgc() { + let custody_context = CustodyContext::::new(false); + let spec = E::default_spec(); + let all_custody_groups_ordered = (0..spec.number_of_custody_groups).collect::>(); + let test_epoch = Epoch::new(5); + + custody_context + .init_ordered_data_columns_from_custody_groups(all_custody_groups_ordered, &spec) + .expect("should initialise ordered data columns"); + + let expected_cgc = custody_context.custody_group_count_at_epoch(test_epoch, &spec); + assert_eq!( + custody_context + .custody_columns_for_epoch(Some(test_epoch), &spec) + .len(), + expected_cgc as usize + ); + } +} diff --git a/beacon_node/beacon_chain/src/validator_monitor.rs b/beacon_node/beacon_chain/src/validator_monitor.rs index 2a76d65d328..dc467a0a6c4 100644 --- a/beacon_node/beacon_chain/src/validator_monitor.rs +++ b/beacon_node/beacon_chain/src/validator_monitor.rs @@ -1180,12 +1180,14 @@ impl ValidatorMonitor { seen_timestamp: Duration, indexed_attestation: &IndexedAttestation, slot_clock: &S, + spec: &ChainSpec, ) { self.register_unaggregated_attestation( "gossip", seen_timestamp, indexed_attestation, slot_clock, + spec, ) } @@ -1195,12 +1197,14 @@ impl ValidatorMonitor { seen_timestamp: Duration, indexed_attestation: &IndexedAttestation, slot_clock: &S, + spec: &ChainSpec, ) { self.register_unaggregated_attestation( "api", seen_timestamp, indexed_attestation, slot_clock, + spec, ) } @@ -1210,13 +1214,15 @@ impl ValidatorMonitor { seen_timestamp: Duration, indexed_attestation: &IndexedAttestation, slot_clock: &S, + spec: &ChainSpec, ) { let data = indexed_attestation.data(); let epoch = data.slot.epoch(E::slots_per_epoch()); let delay = get_message_delay_ms( seen_timestamp, data.slot, - Duration::from_secs(0), + spec.get_unaggregated_attestation_due() + .unwrap_or(Duration::from_secs(0)), slot_clock, ); @@ -1263,6 +1269,7 @@ impl ValidatorMonitor { signed_aggregate_and_proof: &SignedAggregateAndProof, indexed_attestation: &IndexedAttestation, slot_clock: &S, + spec: &ChainSpec, ) { self.register_aggregated_attestation( "gossip", @@ -1270,6 +1277,7 @@ impl ValidatorMonitor { signed_aggregate_and_proof, indexed_attestation, slot_clock, + spec, ) } @@ -1280,6 +1288,7 @@ impl ValidatorMonitor { signed_aggregate_and_proof: &SignedAggregateAndProof, indexed_attestation: &IndexedAttestation, slot_clock: &S, + spec: &ChainSpec, ) { self.register_aggregated_attestation( "api", @@ -1287,6 +1296,7 @@ impl ValidatorMonitor { signed_aggregate_and_proof, indexed_attestation, slot_clock, + spec, ) } @@ -1297,13 +1307,15 @@ impl ValidatorMonitor { signed_aggregate_and_proof: &SignedAggregateAndProof, indexed_attestation: &IndexedAttestation, slot_clock: &S, + spec: &ChainSpec, ) { let data = indexed_attestation.data(); let epoch = data.slot.epoch(E::slots_per_epoch()); let delay = get_message_delay_ms( seen_timestamp, data.slot, - slot_clock.agg_attestation_production_delay(), + spec.get_aggregate_attestation_due() + .unwrap_or(Duration::from_secs(0)), slot_clock, ); @@ -1488,12 +1500,14 @@ impl ValidatorMonitor { seen_timestamp: Duration, sync_committee_message: &SyncCommitteeMessage, slot_clock: &S, + spec: &ChainSpec, ) { self.register_sync_committee_message( "gossip", seen_timestamp, sync_committee_message, slot_clock, + spec, ) } @@ -1503,12 +1517,14 @@ impl ValidatorMonitor { seen_timestamp: Duration, sync_committee_message: &SyncCommitteeMessage, slot_clock: &S, + spec: &ChainSpec, ) { self.register_sync_committee_message( "api", seen_timestamp, sync_committee_message, slot_clock, + spec, ) } @@ -1519,15 +1535,16 @@ impl ValidatorMonitor { seen_timestamp: Duration, sync_committee_message: &SyncCommitteeMessage, slot_clock: &S, + spec: &ChainSpec, ) { if let Some(validator) = self.get_validator(sync_committee_message.validator_index) { let id = &validator.id; - let epoch = sync_committee_message.slot.epoch(E::slots_per_epoch()); let delay = get_message_delay_ms( seen_timestamp, sync_committee_message.slot, - slot_clock.sync_committee_message_production_delay(), + spec.get_sync_message_due() + .unwrap_or(Duration::from_secs(0)), slot_clock, ); @@ -1568,6 +1585,7 @@ impl ValidatorMonitor { sync_contribution: &SignedContributionAndProof, participant_pubkeys: &[PublicKeyBytes], slot_clock: &S, + spec: &ChainSpec, ) { self.register_sync_committee_contribution( "gossip", @@ -1575,6 +1593,7 @@ impl ValidatorMonitor { sync_contribution, participant_pubkeys, slot_clock, + spec, ) } @@ -1585,6 +1604,7 @@ impl ValidatorMonitor { sync_contribution: &SignedContributionAndProof, participant_pubkeys: &[PublicKeyBytes], slot_clock: &S, + spec: &ChainSpec, ) { self.register_sync_committee_contribution( "api", @@ -1592,6 +1612,7 @@ impl ValidatorMonitor { sync_contribution, participant_pubkeys, slot_clock, + spec, ) } @@ -1603,6 +1624,7 @@ impl ValidatorMonitor { sync_contribution: &SignedContributionAndProof, participant_pubkeys: &[PublicKeyBytes], slot_clock: &S, + spec: &ChainSpec, ) { let slot = sync_contribution.message.contribution.slot; let epoch = slot.epoch(E::slots_per_epoch()); @@ -1610,7 +1632,8 @@ impl ValidatorMonitor { let delay = get_message_delay_ms( seen_timestamp, slot, - slot_clock.sync_committee_contribution_production_delay(), + spec.get_contribution_message_due() + .unwrap_or(Duration::from_secs(0)), slot_clock, ); diff --git a/beacon_node/beacon_chain/tests/bellatrix.rs b/beacon_node/beacon_chain/tests/bellatrix.rs index 5d466dd1d38..fc0f96ef887 100644 --- a/beacon_node/beacon_chain/tests/bellatrix.rs +++ b/beacon_node/beacon_chain/tests/bellatrix.rs @@ -174,7 +174,7 @@ async fn base_altair_bellatrix_with_terminal_block_after_fork() { .unwrap(); // Add a slot duration to get to the next slot - let timestamp = harness.get_timestamp_at_slot() + harness.spec.seconds_per_slot; + let timestamp = harness.get_timestamp_at_slot() + harness.spec.get_slot_duration().as_secs(); harness .execution_block_generator() diff --git a/beacon_node/beacon_chain/tests/capella.rs b/beacon_node/beacon_chain/tests/capella.rs index 2c2ba8e01a7..e8ab795366c 100644 --- a/beacon_node/beacon_chain/tests/capella.rs +++ b/beacon_node/beacon_chain/tests/capella.rs @@ -103,7 +103,7 @@ async fn base_altair_bellatrix_capella() { .unwrap(); // Add a slot duration to get to the next slot - let timestamp = harness.get_timestamp_at_slot() + harness.spec.seconds_per_slot; + let timestamp = harness.get_timestamp_at_slot() + harness.spec.get_slot_duration().as_secs(); harness .execution_block_generator() .modify_last_block(|block| { diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index ba0621ae720..341e6de0b78 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -3011,7 +3011,6 @@ async fn weak_subjectivity_sync_test( let temp2 = tempdir().unwrap(); let store = get_store(&temp2); let spec = test_spec::(); - let seconds_per_slot = spec.seconds_per_slot; let kzg = get_kzg(&spec); @@ -3025,7 +3024,7 @@ async fn weak_subjectivity_sync_test( let slot_clock = TestingSlotClock::new( Slot::new(0), Duration::from_secs(harness.chain.genesis_time), - Duration::from_secs(seconds_per_slot), + spec.get_slot_duration(), ); slot_clock.set_slot(harness.get_current_slot().as_u64()); @@ -3881,8 +3880,6 @@ async fn revert_minority_fork_on_resume() { let mut spec2 = MinimalEthSpec::default_spec(); spec2.altair_fork_epoch = Some(fork_epoch); - let seconds_per_slot = spec1.seconds_per_slot; - let all_validators = (0..validator_count).collect::>(); // Chain with no fork epoch configured. @@ -4002,7 +3999,7 @@ async fn revert_minority_fork_on_resume() { builder = builder .resume_from_db() .unwrap() - .testing_slot_clock(Duration::from_secs(seconds_per_slot)) + .testing_slot_clock(spec2.get_slot_duration()) .unwrap(); builder .get_slot_clock() diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index c48021e45d4..161d52e2a7f 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -315,7 +315,7 @@ where let deneb_time = genesis_time + (deneb_fork_epoch.as_u64() * E::slots_per_epoch() - * spec.seconds_per_slot); + * spec.get_slot_duration().as_secs()); // Shrink the blob availability window so users don't start // a sync right before blobs start to disappear from the P2P @@ -325,7 +325,7 @@ where .saturating_sub(BLOB_AVAILABILITY_REDUCTION_EPOCHS); let blob_availability_window = reduced_p2p_availability_epochs * E::slots_per_epoch() - * spec.seconds_per_slot; + * spec.get_slot_duration().as_secs(); if now > deneb_time + blob_availability_window { return Err( @@ -592,17 +592,17 @@ where .network_globals .clone() .ok_or("slot_notifier requires a libp2p network")?; - let seconds_per_slot = self + let slot_duration = self .chain_spec .as_ref() .ok_or("slot_notifier requires a chain spec")? - .seconds_per_slot; + .get_slot_duration(); spawn_notifier( context.executor, beacon_chain, network_globals, - seconds_per_slot, + slot_duration, ) .map_err(|e| format!("Unable to start slot notifier: {}", e))?; @@ -907,7 +907,7 @@ where let slot_clock = SystemTimeSlotClock::new( spec.genesis_slot, Duration::from_secs(genesis_time), - Duration::from_secs(spec.seconds_per_slot), + spec.get_slot_duration(), ); self.slot_clock = Some(slot_clock); diff --git a/beacon_node/client/src/notifier.rs b/beacon_node/client/src/notifier.rs index 52a3b92cb60..3f01622c352 100644 --- a/beacon_node/client/src/notifier.rs +++ b/beacon_node/client/src/notifier.rs @@ -44,10 +44,8 @@ pub fn spawn_notifier( executor: task_executor::TaskExecutor, beacon_chain: Arc>, network: Arc>, - seconds_per_slot: u64, + slot_duration: Duration, ) -> Result<(), String> { - let slot_duration = Duration::from_secs(seconds_per_slot); - let speedo = Mutex::new(Speedo::default()); // Keep track of sync state and reset the speedo on specific sync state changes. @@ -568,8 +566,8 @@ fn find_next_fork_to_prepare( // Find the first fork that is scheduled and close to happen if let Some(fork_epoch) = fork_epoch { let fork_slot = fork_epoch.start_slot(T::EthSpec::slots_per_epoch()); - let preparation_slots = - FORK_READINESS_PREPARATION_SECONDS / beacon_chain.spec.seconds_per_slot; + let preparation_slots = FORK_READINESS_PREPARATION_SECONDS + / beacon_chain.spec.get_slot_duration().as_secs(); let in_fork_preparation_period = current_slot + preparation_slots > fork_slot; if in_fork_preparation_period { return Some(*fork); diff --git a/beacon_node/execution_layer/src/test_utils/mock_builder.rs b/beacon_node/execution_layer/src/test_utils/mock_builder.rs index 884aa9bf47a..e52b2862ea9 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_builder.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_builder.rs @@ -860,7 +860,8 @@ impl MockBuilder { .data .genesis_time }; - let timestamp = (slots_since_genesis * self.spec.seconds_per_slot) + genesis_time; + let timestamp = + (slots_since_genesis * self.spec.get_slot_duration().as_secs()) + genesis_time; let head_state: BeaconState = self .beacon_client diff --git a/beacon_node/http_api/src/publish_attestations.rs b/beacon_node/http_api/src/publish_attestations.rs index 947edf56d95..50c0cbe6c62 100644 --- a/beacon_node/http_api/src/publish_attestations.rs +++ b/beacon_node/http_api/src/publish_attestations.rs @@ -96,6 +96,7 @@ fn verify_and_publish_attestation( seen_timestamp, verified_attestation.indexed_attestation(), &chain.slot_clock, + &chain.spec, ); let fc_result = chain.apply_attestation_to_fork_choice(&verified_attestation); diff --git a/beacon_node/http_api/src/publish_blocks.rs b/beacon_node/http_api/src/publish_blocks.rs index b54c071eb80..800fa69390c 100644 --- a/beacon_node/http_api/src/publish_blocks.rs +++ b/beacon_node/http_api/src/publish_blocks.rs @@ -22,7 +22,6 @@ use lighthouse_network::PubsubMessage; use lighthouse_tracing::SPAN_PUBLISH_BLOCK; use network::NetworkMessage; use rand::prelude::SliceRandom; -use slot_clock::SlotClock; use std::marker::PhantomData; use std::sync::Arc; use std::sync::atomic::{AtomicBool, Ordering}; @@ -762,7 +761,10 @@ fn late_block_logging>( // // Check to see the thresholds are non-zero to avoid logging errors with small // slot times (e.g., during testing) - let too_late_threshold = chain.slot_clock.unagg_attestation_production_delay(); + let too_late_threshold = chain + .spec + .get_unaggregated_attestation_due() + .unwrap_or(delay); let delayed_threshold = too_late_threshold / 2; if delay >= too_late_threshold { error!( diff --git a/beacon_node/http_api/src/sync_committees.rs b/beacon_node/http_api/src/sync_committees.rs index b9fa24ad6a4..856c7c50550 100644 --- a/beacon_node/http_api/src/sync_committees.rs +++ b/beacon_node/http_api/src/sync_committees.rs @@ -235,6 +235,7 @@ pub fn process_sync_committee_signatures( seen_timestamp, verified.sync_message(), &chain.slot_clock, + &chain.spec, ); verified_for_pool = Some(verified); @@ -376,6 +377,7 @@ pub fn process_signed_contribution_and_proofs( verified_contribution.aggregate(), verified_contribution.participant_pubkeys(), &chain.slot_clock, + &chain.spec, ); verified_contributions.push((index, verified_contribution)); diff --git a/beacon_node/http_api/src/validator/mod.rs b/beacon_node/http_api/src/validator/mod.rs index 8baf7c52458..b1ab4c648a3 100644 --- a/beacon_node/http_api/src/validator/mod.rs +++ b/beacon_node/http_api/src/validator/mod.rs @@ -862,6 +862,7 @@ pub fn post_validator_aggregate_and_proofs( verified_aggregate.aggregate(), verified_aggregate.indexed_attestation(), &chain.slot_clock, + &chain.spec, ); verified_aggregates.push((index, verified_aggregate)); diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index ed7abead18a..0b27a761281 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -52,7 +52,7 @@ use types::{ type E = MainnetEthSpec; -const SECONDS_PER_SLOT: u64 = 12; +const SLOT_DURATION_MS: u64 = 12_000; const SLOTS_PER_EPOCH: u64 = 32; const VALIDATOR_COUNT: usize = SLOTS_PER_EPOCH as usize; const CHAIN_LENGTH: u64 = SLOTS_PER_EPOCH * 5 - 1; // Make `next_block` an epoch transition @@ -323,7 +323,7 @@ impl ApiTester { let client = BeaconNodeHttpClient::new( beacon_url, - Timeouts::set_all(Duration::from_secs(SECONDS_PER_SLOT)), + Timeouts::set_all(Duration::from_millis(SLOT_DURATION_MS)), ); Self { @@ -411,7 +411,7 @@ impl ApiTester { listening_socket.port() )) .unwrap(), - Timeouts::set_all(Duration::from_secs(SECONDS_PER_SLOT)), + Timeouts::set_all(Duration::from_millis(SLOT_DURATION_MS)), ); Self { diff --git a/beacon_node/lighthouse_network/src/config.rs b/beacon_node/lighthouse_network/src/config.rs index 416ca73e08e..c33322bf431 100644 --- a/beacon_node/lighthouse_network/src/config.rs +++ b/beacon_node/lighthouse_network/src/config.rs @@ -443,7 +443,7 @@ pub fn gossipsub_config( network_load: u8, fork_context: Arc, gossipsub_config_params: GossipsubConfigParams, - seconds_per_slot: u64, + slot_duration: Duration, slots_per_epoch: u64, idontwant_message_size_threshold: usize, ) -> gossipsub::Config { @@ -487,7 +487,7 @@ pub fn gossipsub_config( // To accommodate the increase, we should increase the duplicate cache time to filter older seen messages. // 2 epochs is quite sane for pre-deneb network parameters as well. // Hence we keep the same parameters for pre-deneb networks as well to avoid switching at the fork. - let duplicate_cache_time = Duration::from_secs(slots_per_epoch * seconds_per_slot * 2); + let duplicate_cache_time = Duration::from_secs(slots_per_epoch * slot_duration.as_secs() * 2); gossipsub::ConfigBuilder::default() .max_transmit_size(gossipsub_config_params.gossipsub_max_transmit_size) diff --git a/beacon_node/lighthouse_network/src/service/gossipsub_scoring_parameters.rs b/beacon_node/lighthouse_network/src/service/gossipsub_scoring_parameters.rs index 873d3f92525..2f3cb9c3f5b 100644 --- a/beacon_node/lighthouse_network/src/service/gossipsub_scoring_parameters.rs +++ b/beacon_node/lighthouse_network/src/service/gossipsub_scoring_parameters.rs @@ -52,7 +52,7 @@ pub struct PeerScoreSettings { impl PeerScoreSettings { pub fn new(chain_spec: &ChainSpec, mesh_n: usize) -> PeerScoreSettings { - let slot = Duration::from_secs(chain_spec.seconds_per_slot); + let slot = Duration::from_millis(chain_spec.slot_duration_ms); let beacon_attestation_subnet_weight = 1.0 / chain_spec.attestation_subnet_count as f64; let max_positive_score = (MAX_IN_MESH_SCORE + MAX_FIRST_MESSAGE_DELIVERIES_SCORE) * (BEACON_BLOCK_WEIGHT diff --git a/beacon_node/lighthouse_network/src/service/mod.rs b/beacon_node/lighthouse_network/src/service/mod.rs index 4eebda1decb..c34a26eb82f 100644 --- a/beacon_node/lighthouse_network/src/service/mod.rs +++ b/beacon_node/lighthouse_network/src/service/mod.rs @@ -232,7 +232,7 @@ impl Network { config.network_load, ctx.fork_context.clone(), gossipsub_config_params, - ctx.chain_spec.seconds_per_slot, + ctx.chain_spec.get_slot_duration(), E::slots_per_epoch(), config.idontwant_message_size_threshold, ); @@ -240,13 +240,12 @@ impl Network { let score_settings = PeerScoreSettings::new(&ctx.chain_spec, gs_config.mesh_n()); let gossip_cache = { - let slot_duration = std::time::Duration::from_secs(ctx.chain_spec.seconds_per_slot); - let half_epoch = std::time::Duration::from_secs( - ctx.chain_spec.seconds_per_slot * E::slots_per_epoch() / 2, + let half_epoch = std::time::Duration::from_millis( + (ctx.chain_spec.get_slot_duration().as_millis() as u64) * E::slots_per_epoch() / 2, ); GossipCache::builder() - .beacon_block_timeout(slot_duration) + .beacon_block_timeout(ctx.chain_spec.get_slot_duration()) .aggregates_timeout(half_epoch) .attestation_timeout(half_epoch) .voluntary_exit_timeout(half_epoch * 2) diff --git a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs index eb70147c6ef..c171de25119 100644 --- a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs @@ -332,6 +332,7 @@ impl NetworkBeaconProcessor { seen_timestamp, indexed_attestation, &self.chain.slot_clock, + &self.chain.spec, ); // If the attestation is still timely, propagate it. @@ -541,6 +542,7 @@ impl NetworkBeaconProcessor { aggregate, indexed_attestation, &self.chain.slot_clock, + &self.chain.spec, ); metrics::inc_counter( @@ -801,7 +803,13 @@ impl NetworkBeaconProcessor { Ok(gossip_verified_blob) => { metrics::inc_counter(&metrics::BEACON_PROCESSOR_GOSSIP_BLOB_VERIFIED_TOTAL); - if delay >= self.chain.slot_clock.unagg_attestation_production_delay() { + if delay + >= self + .chain + .spec + .get_unaggregated_attestation_due() + .unwrap_or(delay) + { metrics::inc_counter(&metrics::BEACON_BLOB_GOSSIP_ARRIVED_LATE_TOTAL); debug!( block_root = ?gossip_verified_blob.block_root(), @@ -1229,7 +1237,13 @@ impl NetworkBeaconProcessor { let verified_block = match verification_result { Ok(verified_block) => { - if block_delay >= self.chain.slot_clock.unagg_attestation_production_delay() { + if block_delay + >= self + .chain + .spec + .get_unaggregated_attestation_due() + .unwrap_or(block_delay) + { metrics::inc_counter(&metrics::BEACON_BLOCK_DELAY_GOSSIP_ARRIVED_LATE_TOTAL); debug!( block_root = ?verified_block.block_root, @@ -1906,6 +1920,7 @@ impl NetworkBeaconProcessor { seen_timestamp, sync_signature.sync_message(), &self.chain.slot_clock, + &self.chain.spec, ); metrics::inc_counter(&metrics::BEACON_PROCESSOR_SYNC_MESSAGE_VERIFIED_TOTAL); @@ -1968,6 +1983,7 @@ impl NetworkBeaconProcessor { sync_contribution.aggregate(), sync_contribution.participant_pubkeys(), &self.chain.slot_clock, + &self.chain.spec, ); metrics::inc_counter(&metrics::BEACON_PROCESSOR_SYNC_CONTRIBUTION_VERIFIED_TOTAL); diff --git a/beacon_node/network/src/network_beacon_processor/sync_methods.rs b/beacon_node/network/src/network_beacon_processor/sync_methods.rs index e49ae134fe4..c49b76aa243 100644 --- a/beacon_node/network/src/network_beacon_processor/sync_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/sync_methods.rs @@ -303,7 +303,7 @@ impl NetworkBeaconProcessor { && current_slot == slot { // Note: this metric is useful to gauge how long it takes to receive blobs requested - // over rpc. Since we always send the request for block components at `slot_clock.single_lookup_delay()` + // over rpc. Since we always send the request for block components at `get_unaggregated_attestation_due() / 2` // we can use that as a baseline to measure against. let delay = get_slot_delay_ms(seen_timestamp, slot, &self.chain.slot_clock); diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index 0869b442aec..b8958ba5e55 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -862,9 +862,9 @@ impl NetworkService { self.next_digest_update = Box::pin(next_digest_delay(&self.beacon_chain).into()); // Set the next_unsubscribe delay. - let epoch_duration = - self.beacon_chain.spec.seconds_per_slot * T::EthSpec::slots_per_epoch(); - let unsubscribe_delay = Duration::from_secs(UNSUBSCRIBE_DELAY_EPOCHS * epoch_duration); + let unsubscribe_delay = Duration::from_secs( + UNSUBSCRIBE_DELAY_EPOCHS * self.beacon_chain.spec.get_slot_duration().as_secs(), + ); // Update the `next_topic_subscriptions` timer if the next change in the fork digest is known. self.next_topic_subscriptions = @@ -915,7 +915,7 @@ fn next_topic_subscriptions_delay( ) -> Option { if let Some((_, duration_to_epoch)) = beacon_chain.duration_to_next_digest() { let duration_to_subscription = duration_to_epoch.saturating_sub(Duration::from_secs( - beacon_chain.spec.seconds_per_slot * SUBSCRIBE_DELAY_SLOTS, + beacon_chain.spec.get_slot_duration().as_secs() * SUBSCRIBE_DELAY_SLOTS, )); if !duration_to_subscription.is_zero() { return Some(tokio::time::sleep(duration_to_subscription)); diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index 26dd3b6642e..1819491fbe6 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -767,7 +767,7 @@ pub fn get_config( clap_utils::parse_optional(cli_args, "prepare-payload-lookahead")? .map(Duration::from_millis) .unwrap_or_else(|| { - Duration::from_secs(spec.seconds_per_slot) + Duration::from_millis(spec.slot_duration_ms) / DEFAULT_PREPARE_PAYLOAD_LOOKAHEAD_FACTOR }); diff --git a/book/src/api_vc_endpoints.md b/book/src/api_vc_endpoints.md index d128b13b2f8..cc9dd362f80 100644 --- a/book/src/api_vc_endpoints.md +++ b/book/src/api_vc_endpoints.md @@ -249,6 +249,7 @@ Example Response Body "FULU_FORK_VERSION": "0x70000910", "FULU_FORK_EPOCH": "18446744073709551615", "SECONDS_PER_SLOT": "12", + "SLOT_DURATION_MS": "12000", "SECONDS_PER_ETH1_BLOCK": "12", "MIN_VALIDATOR_WITHDRAWABILITY_DELAY": "256", "SHARD_COMMITTEE_PERIOD": "256", diff --git a/common/eth2_network_config/built_in_network_configs/chiado/config.yaml b/common/eth2_network_config/built_in_network_configs/chiado/config.yaml index 6bc41113d6f..668018e22cc 100644 --- a/common/eth2_network_config/built_in_network_configs/chiado/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/chiado/config.yaml @@ -58,6 +58,8 @@ GLOAS_FORK_EPOCH: 18446744073709551615 # --------------------------------------------------------------- # 5 seconds SECONDS_PER_SLOT: 5 +# 5 seconds +SLOT_DURATION_MS: 5000 # 6 (estimate from xDai mainnet) SECONDS_PER_ETH1_BLOCK: 6 # 2**8 (= 256) epochs ~5.7 hours @@ -66,6 +68,18 @@ MIN_VALIDATOR_WITHDRAWABILITY_DELAY: 256 SHARD_COMMITTEE_PERIOD: 256 # 2**10 (= 1024) ~1.4 hour ETH1_FOLLOW_DISTANCE: 1024 +# 1667 basis points, ~17% of SLOT_DURATION_MS +PROPOSER_REORG_CUTOFF_BPS: 1667 +# 3333 basis points, ~33% of SLOT_DURATION_MS +ATTESTATION_DUE_BPS: 3333 +# 6667 basis points, ~67% of SLOT_DURATION_MS +AGGREGATE_DUE_BPS: 6667 + +# Altair +# 3333 basis points, ~33% of SLOT_DURATION_MS +SYNC_MESSAGE_DUE_BPS: 3333 +# 6667 basis points, ~67% of SLOT_DURATION_MS +CONTRIBUTION_DUE_BPS: 6667 # Validator cycle # --------------------------------------------------------------- diff --git a/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml b/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml index aa2dbb35d35..f5a4f1a3822 100644 --- a/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml @@ -55,6 +55,8 @@ GLOAS_FORK_EPOCH: 18446744073709551615 # --------------------------------------------------------------- # 5 seconds SECONDS_PER_SLOT: 5 +# 5 seconds +SLOT_DURATION_MS: 5000 # 6 (estimate from Gnosis Chain) SECONDS_PER_ETH1_BLOCK: 6 # 2**8 (= 256) epochs ~8 hours @@ -63,6 +65,18 @@ MIN_VALIDATOR_WITHDRAWABILITY_DELAY: 256 SHARD_COMMITTEE_PERIOD: 256 # 2**10 (= 1024) ~1.4 hour ETH1_FOLLOW_DISTANCE: 1024 +# 1667 basis points, ~17% of SLOT_DURATION_MS +PROPOSER_REORG_CUTOFF_BPS: 1667 +# 3333 basis points, ~33% of SLOT_DURATION_MS +ATTESTATION_DUE_BPS: 3333 +# 6667 basis points, ~67% of SLOT_DURATION_MS +AGGREGATE_DUE_BPS: 6667 + +# Altair +# 3333 basis points, ~33% of SLOT_DURATION_MS +SYNC_MESSAGE_DUE_BPS: 3333 +# 6667 basis points, ~67% of SLOT_DURATION_MS +CONTRIBUTION_DUE_BPS: 6667 # Validator cycle # --------------------------------------------------------------- diff --git a/common/slot_clock/src/lib.rs b/common/slot_clock/src/lib.rs index e51bc3f6473..abfab547b97 100644 --- a/common/slot_clock/src/lib.rs +++ b/common/slot_clock/src/lib.rs @@ -9,7 +9,6 @@ pub use crate::manual_slot_clock::ManualSlotClock; pub use crate::system_time_slot_clock::SystemTimeSlotClock; pub use metrics::scrape_for_metrics; pub use types::Slot; -use types::consts::bellatrix::INTERVALS_PER_SLOT; /// A clock that reports the current slot. /// @@ -77,30 +76,6 @@ pub trait SlotClock: Send + Sync + Sized + Clone { .or_else(|| Some(self.genesis_slot())) } - /// Returns the delay between the start of the slot and when unaggregated attestations should be - /// produced. - fn unagg_attestation_production_delay(&self) -> Duration { - self.slot_duration() / INTERVALS_PER_SLOT as u32 - } - - /// Returns the delay between the start of the slot and when sync committee messages should be - /// produced. - fn sync_committee_message_production_delay(&self) -> Duration { - self.slot_duration() / INTERVALS_PER_SLOT as u32 - } - - /// Returns the delay between the start of the slot and when aggregated attestations should be - /// produced. - fn agg_attestation_production_delay(&self) -> Duration { - self.slot_duration() * 2 / INTERVALS_PER_SLOT as u32 - } - - /// Returns the delay between the start of the slot and when partially aggregated `SyncCommitteeContribution` should be - /// produced. - fn sync_committee_contribution_production_delay(&self) -> Duration { - self.slot_duration() * 2 / INTERVALS_PER_SLOT as u32 - } - /// Returns the `Duration` since the start of the current `Slot` at seconds precision. Useful in determining whether to apply proposer boosts. fn seconds_from_current_slot_start(&self) -> Option { self.now_duration() @@ -134,13 +109,4 @@ pub trait SlotClock: Send + Sync + Sized + Clone { slot_clock.set_current_time(freeze_at); slot_clock } - - /// Returns the delay between the start of the slot and when a request for block components - /// missed over gossip in the current slot should be made via RPC. - /// - /// Currently set equal to 1/2 of the `unagg_attestation_production_delay`, but this may be - /// changed in the future. - fn single_lookup_delay(&self) -> Duration { - self.unagg_attestation_production_delay() / 2 - } } diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index 9a8cae0c365..386cd9b1a46 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -21,7 +21,6 @@ use types::{ AbstractExecPayload, AttestationShufflingId, AttesterSlashingRef, BeaconBlockRef, BeaconState, BeaconStateError, ChainSpec, Checkpoint, Epoch, EthSpec, ExecPayload, ExecutionBlockHash, Hash256, IndexedAttestationRef, RelativeEpoch, SignedBeaconBlock, Slot, - consts::bellatrix::INTERVALS_PER_SLOT, }; #[derive(Debug)] @@ -77,6 +76,7 @@ pub enum Error { }, UnrealizedVoteProcessing(state_processing::EpochProcessingError), ValidatorStatuses(BeaconStateError), + ChainSpecError(String), } impl From for Error { @@ -727,9 +727,12 @@ where })); } + let attestation_threshold = spec.get_unaggregated_attestation_due().map_err(|_| { + Error::ChainSpecError("Failed to get unaggregated attestation due duration".to_string()) + })?; + // Add proposer score boost if the block is timely. - let is_before_attesting_interval = - block_delay < Duration::from_secs(spec.seconds_per_slot / INTERVALS_PER_SLOT); + let is_before_attesting_interval = block_delay < attestation_threshold; let is_first_block = self.fc_store.proposer_boost_root().is_zero(); if current_slot == block.slot() && is_before_attesting_interval && is_first_block { diff --git a/consensus/state_processing/src/per_block_processing.rs b/consensus/state_processing/src/per_block_processing.rs index 07149ff2ee8..7e244d134e7 100644 --- a/consensus/state_processing/src/per_block_processing.rs +++ b/consensus/state_processing/src/per_block_processing.rs @@ -510,7 +510,7 @@ pub fn compute_timestamp_at_slot( ) -> Result { let slots_since_genesis = block_slot.as_u64().safe_sub(spec.genesis_slot.as_u64())?; slots_since_genesis - .safe_mul(spec.seconds_per_slot) + .safe_mul(spec.get_slot_duration().as_secs()) .and_then(|since_genesis| state.genesis_time().safe_add(since_genesis)) } diff --git a/consensus/types/src/core/chain_spec.rs b/consensus/types/src/core/chain_spec.rs index da3f9b90ccc..6f21421e1af 100644 --- a/consensus/types/src/core/chain_spec.rs +++ b/consensus/types/src/core/chain_spec.rs @@ -12,6 +12,7 @@ use ssz_types::{RuntimeVariableList, VariableList}; use tree_hash::TreeHash; use crate::{ + consts::bellatrix::BASIS_POINTS, core::{ APPLICATION_DOMAIN_BUILDER, Address, ApplicationDomain, EnrForkId, Epoch, EthSpec, EthSpecId, Hash256, MainnetEthSpec, Slot, Uint256, @@ -97,6 +98,7 @@ pub struct ChainSpec { * Time parameters */ pub genesis_delay: u64, + // TODO deprecate seconds_per_slot pub seconds_per_slot: u64, pub slot_duration_ms: u64, pub min_attestation_inclusion_delay: u64, @@ -896,6 +898,43 @@ impl ChainSpec { ) } + /// Get the duration into a slot in which an unaggregated attestation is due + pub fn get_unaggregated_attestation_due(&self) -> Result { + self.get_slot_component_duration(self.attestation_due_bps) + } + + /// Get the duration into a slot in which an aggregated attestation is due + pub fn get_aggregate_attestation_due(&self) -> Result { + self.get_slot_component_duration(self.aggregate_due_bps) + } + + /// Get the duration into a slot in which a `SignedContributionAndProof` is due + pub fn get_contribution_message_due(&self) -> Result { + self.get_slot_component_duration(self.contribution_due_bps) + } + + /// Get the duration into a slot in which a sync committee message is due + pub fn get_sync_message_due(&self) -> Result { + self.get_slot_component_duration(self.sync_message_due_bps) + } + + /// Calculate the duration into a slot for a given slot component + fn get_slot_component_duration( + &self, + component_basis_points: u64, + ) -> Result { + Ok(Duration::from_millis( + component_basis_points + .safe_mul(self.slot_duration_ms)? + .safe_div(BASIS_POINTS)?, + )) + } + + /// Get the duration of a slot + pub fn get_slot_duration(&self) -> Duration { + Duration::from_millis(self.slot_duration_ms) + } + /// Returns the slot at which the proposer shuffling was decided. /// /// The block root at this slot can be used to key the proposer shuffling for the given epoch. @@ -1238,6 +1277,7 @@ impl ChainSpec { shard_committee_period: 64, genesis_delay: 300, seconds_per_slot: 6, + slot_duration_ms: 6000, inactivity_penalty_quotient: u64::checked_pow(2, 25).expect("pow does not overflow"), min_slashing_penalty_quotient: 64, proportional_slashing_multiplier: 2, @@ -1364,8 +1404,6 @@ impl ChainSpec { proposer_reorg_cutoff_bps: 1667, attestation_due_bps: 3333, aggregate_due_bps: 6667, - sync_message_due_bps: 3333, - contribution_due_bps: 6667, /* * Reward and penalty quotients @@ -1432,6 +1470,8 @@ impl ChainSpec { domain_contribution_and_proof: 9, altair_fork_version: [0x01, 0x00, 0x00, 0x64], altair_fork_epoch: Some(Epoch::new(512)), + sync_message_due_bps: 3333, + contribution_due_bps: 6667, /* * Bellatrix hard fork params @@ -1782,6 +1822,9 @@ pub struct Config { #[serde(with = "serde_utils::quoted_u64")] seconds_per_slot: u64, + #[serde(default)] + #[serde(skip_serializing_if = "Option::is_none")] + slot_duration_ms: Option>, #[serde(with = "serde_utils::quoted_u64")] seconds_per_eth1_block: u64, #[serde(with = "serde_utils::quoted_u64")] @@ -2247,6 +2290,9 @@ impl Config { .map(|epoch| MaybeQuoted { value: epoch }), seconds_per_slot: spec.seconds_per_slot, + slot_duration_ms: Some(MaybeQuoted { + value: spec.slot_duration_ms, + }), seconds_per_eth1_block: spec.seconds_per_eth1_block, min_validator_withdrawability_delay: spec.min_validator_withdrawability_delay, shard_committee_period: spec.shard_committee_period, @@ -2338,6 +2384,7 @@ impl Config { gloas_fork_version, gloas_fork_epoch, seconds_per_slot, + slot_duration_ms, seconds_per_eth1_block, min_validator_withdrawability_delay, shard_committee_period, @@ -2411,6 +2458,9 @@ impl Config { gloas_fork_version, gloas_fork_epoch: gloas_fork_epoch.map(|q| q.value), seconds_per_slot, + slot_duration_ms: slot_duration_ms + .map(|q| q.value) + .unwrap_or_else(|| seconds_per_slot.saturating_mul(1000)), seconds_per_eth1_block, min_validator_withdrawability_delay, shard_committee_period, @@ -2670,6 +2720,7 @@ mod yaml_tests { GENESIS_FORK_VERSION: 0x10355025 GENESIS_DELAY: 60 SECONDS_PER_SLOT: 12 + SLOT_DURATION_MS: 12000 SECONDS_PER_ETH1_BLOCK: 12 MIN_VALIDATOR_WITHDRAWABILITY_DELAY: 256 SHARD_COMMITTEE_PERIOD: 256 @@ -2819,6 +2870,7 @@ mod yaml_tests { GENESIS_FORK_VERSION: 0x10355025 GENESIS_DELAY: 60 SECONDS_PER_SLOT: 12 + SLOT_DURATION_MS: 12000 SECONDS_PER_ETH1_BLOCK: 12 MIN_VALIDATOR_WITHDRAWABILITY_DELAY: 256 SHARD_COMMITTEE_PERIOD: 256 @@ -2931,6 +2983,7 @@ mod yaml_tests { SHARDING_FORK_VERSION: 0x03000000 SHARDING_FORK_EPOCH: 18446744073709551615 SECONDS_PER_SLOT: 12 + SLOT_DURATION_MS: 12000 SECONDS_PER_ETH1_BLOCK: 14 MIN_VALIDATOR_WITHDRAWABILITY_DELAY: 256 SHARD_COMMITTEE_PERIOD: 256 @@ -3099,4 +3152,66 @@ mod yaml_tests { ); } } + + #[test] + fn test_slot_component_duration_calculations() { + let spec = ChainSpec::mainnet(); + + // Test unaggregated attestation (3333 bps = 33.33% of 12s = 4s) + let unagg_due = spec.get_unaggregated_attestation_due().unwrap(); + assert_eq!(unagg_due, Duration::from_millis(3999)); // 12000 * 3333 / 10000 + + // Test aggregate attestation (6667 bps = 66.67% of 12s = 8s) + let agg_due = spec.get_aggregate_attestation_due().unwrap(); + assert_eq!(agg_due, Duration::from_millis(8000)); // 12000 * 6667 / 10000 + + // Test sync message (3333 bps = 33.33% of 12s = 4s) + let sync_msg_due = spec.get_sync_message_due().unwrap(); + assert_eq!(sync_msg_due, Duration::from_millis(3999)); // 12000 * 3333 / 10000 + + // Test contribution message (6667 bps = 66.67% of 12s = 8s) + let contribution_due = spec.get_contribution_message_due().unwrap(); + assert_eq!(contribution_due, Duration::from_millis(8000)); // 12000 * 6667 / 10000 + + // Test slot duration + let slot_duration = spec.get_slot_duration(); + assert_eq!(slot_duration, Duration::from_millis(12000)); + assert_eq!(slot_duration, Duration::from_secs(spec.seconds_per_slot)); + + // Test edge cases with custom spec + let mut custom_spec = spec.clone(); + + // Edge case: 0 bps should give 0 duration + custom_spec.attestation_due_bps = 0; + let zero_due = custom_spec.get_unaggregated_attestation_due().unwrap(); + assert_eq!(zero_due, Duration::from_millis(0)); + + // Edge case: 10000 bps (100%) should give full slot duration + custom_spec.attestation_due_bps = 10_000; + let full_due = custom_spec.get_unaggregated_attestation_due().unwrap(); + assert_eq!(full_due, Duration::from_millis(12000)); + + // Edge case: 5000 bps (50%) should give half slot duration + custom_spec.attestation_due_bps = 5_000; + let half_due = custom_spec.get_unaggregated_attestation_due().unwrap(); + assert_eq!(half_due, Duration::from_millis(6000)); + + // Test with different slot duration (Gnosis: 5s slots) + custom_spec.slot_duration_ms = 5000; + custom_spec.attestation_due_bps = 3333; + let gnosis_due = custom_spec.get_unaggregated_attestation_due().unwrap(); + assert_eq!(gnosis_due, Duration::from_millis(1666)); // 5000 * 3333 / 10000 + + // Test with very small slot duration + custom_spec.slot_duration_ms = 1000; // 1 second + custom_spec.attestation_due_bps = 3333; + let small_due = custom_spec.get_unaggregated_attestation_due().unwrap(); + assert_eq!(small_due, Duration::from_millis(333)); // 1000 * 3333 / 10000 + + // Test rounding behavior with non-divisible values + custom_spec.slot_duration_ms = 12000; + custom_spec.attestation_due_bps = 1; // 0.01% + let tiny_due = custom_spec.get_unaggregated_attestation_due().unwrap(); + assert_eq!(tiny_due, Duration::from_millis(1)); // 12000 * 1 / 10000 = 1.2 -> 1 + } } diff --git a/consensus/types/src/core/consts.rs b/consensus/types/src/core/consts.rs index b6d63c47a88..bd8d10d7700 100644 --- a/consensus/types/src/core/consts.rs +++ b/consensus/types/src/core/consts.rs @@ -20,7 +20,7 @@ pub mod altair { pub const NUM_FLAG_INDICES: usize = 3; } pub mod bellatrix { - pub const INTERVALS_PER_SLOT: u64 = 3; + pub const BASIS_POINTS: u64 = 10_000; } pub mod deneb { pub use kzg::VERSIONED_HASH_VERSION_KZG; diff --git a/lighthouse/environment/tests/testnet_dir/config.yaml b/lighthouse/environment/tests/testnet_dir/config.yaml index 24c4a67225b..881b35928fc 100644 --- a/lighthouse/environment/tests/testnet_dir/config.yaml +++ b/lighthouse/environment/tests/testnet_dir/config.yaml @@ -47,6 +47,8 @@ TRANSITION_TOTAL_DIFFICULTY: 4294967296 # --------------------------------------------------------------- # 12 seconds SECONDS_PER_SLOT: 12 +# 12 seconds +SLOT_DURATION_MS: 12000 # 14 (estimate from Eth1 mainnet) SECONDS_PER_ETH1_BLOCK: 14 # 2**8 (= 256) epochs ~27 hours @@ -55,6 +57,18 @@ MIN_VALIDATOR_WITHDRAWABILITY_DELAY: 256 SHARD_COMMITTEE_PERIOD: 256 # 2**11 (= 2,048) Eth1 blocks ~8 hours ETH1_FOLLOW_DISTANCE: 2048 +# 1667 basis points, ~17% of SLOT_DURATION_MS +PROPOSER_REORG_CUTOFF_BPS: 1667 +# 3333 basis points, ~33% of SLOT_DURATION_MS +ATTESTATION_DUE_BPS: 3333 +# 6667 basis points, ~67% of SLOT_DURATION_MS +AGGREGATE_DUE_BPS: 6667 + +# Altair +# 3333 basis points, ~33% of SLOT_DURATION_MS +SYNC_MESSAGE_DUE_BPS: 3333 +# 6667 basis points, ~67% of SLOT_DURATION_MS +CONTRIBUTION_DUE_BPS: 6667 # Validator cycle diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index 207324ea33f..8d92af73064 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -2332,7 +2332,7 @@ fn enable_proposer_re_orgs_default() { DEFAULT_RE_ORG_MAX_EPOCHS_SINCE_FINALIZATION, ); assert_eq!( - config.chain.re_org_cutoff(12), + config.chain.re_org_cutoff(Duration::from_secs(12)), Duration::from_secs(12) / DEFAULT_RE_ORG_CUTOFF_DENOMINATOR ); }); @@ -2384,7 +2384,10 @@ fn proposer_re_org_cutoff() { .flag("proposer-reorg-cutoff", Some("500")) .run_with_zero_port() .with_config(|config| { - assert_eq!(config.chain.re_org_cutoff(12), Duration::from_millis(500)) + assert_eq!( + config.chain.re_org_cutoff(Duration::from_secs(12)), + Duration::from_millis(500) + ) }); } diff --git a/scripts/local_testnet/network_params.yaml b/scripts/local_testnet/network_params.yaml index a048674e630..0c36e5c49cc 100644 --- a/scripts/local_testnet/network_params.yaml +++ b/scripts/local_testnet/network_params.yaml @@ -18,7 +18,7 @@ participants: count: 2 network_params: fulu_fork_epoch: 0 - seconds_per_slot: 6 + slot_duration_ms: 3000 snooper_enabled: false global_log_level: debug additional_services: diff --git a/scripts/tests/doppelganger_protection.sh b/scripts/tests/doppelganger_protection.sh index 9009d49d581..e5ffee494fa 100755 --- a/scripts/tests/doppelganger_protection.sh +++ b/scripts/tests/doppelganger_protection.sh @@ -9,7 +9,7 @@ NETWORK_PARAMS_FILE=$SCRIPT_DIR/network_params.yaml BEHAVIOR=$1 ENCLAVE_NAME=local-testnet-$BEHAVIOR -SECONDS_PER_SLOT=$(yq eval ".network_params.seconds_per_slot" $NETWORK_PARAMS_FILE) +SLOT_DURATION_MS=$(yq eval ".network_params.slot_duration_ms" $NETWORK_PARAMS_FILE) KEYS_PER_NODE=$(yq eval ".network_params.num_validator_keys_per_node" $NETWORK_PARAMS_FILE) LH_IMAGE_NAME=$(yq eval ".participants[0].cl_image" $NETWORK_PARAMS_FILE) @@ -56,7 +56,7 @@ GENESIS_DELAY=`curl -s $BN1_HTTP_ADDRESS/eth/v1/config/spec | jq '.data.GENESIS_ CURRENT_TIME=`date +%s` # Note: doppelganger protection can only be started post epoch 0 echo "Waiting until next epoch before starting the next validator client..." -DELAY=$(( $SECONDS_PER_SLOT * 32 + $GENESIS_DELAY + $MIN_GENESIS_TIME - $CURRENT_TIME)) +DELAY=$((($SLOT_DURATION_MS / 1000) * 32 + $GENESIS_DELAY + $MIN_GENESIS_TIME - $CURRENT_TIME)) sleep $DELAY # Use BN2 for the next validator client @@ -98,7 +98,7 @@ EOF # Check if doppelganger VC has stopped and exited. Exit code 1 means the check timed out and VC is still running. check_exit_cmd="until [ \$(get_service_status $service_name) != 'RUNNING' ]; do sleep 1; done" - doppelganger_exit=$(run_command_without_exit "timeout $(( $SECONDS_PER_SLOT * 32 * 2 )) bash -c \"$check_exit_cmd\"") + doppelganger_exit=$(run_command_without_exit "timeout $((($SLOT_DURATION_MS / 1000) * 32 * 2 )) bash -c \"$check_exit_cmd\"") if [[ $doppelganger_exit -eq 1 ]]; then echo "Test failed: expected doppelganger but VC is still running. Check the logs for details." @@ -148,7 +148,7 @@ EOF # # See: https://lighthouse-book.sigmaprime.io/api_validator_inclusion.html echo "Waiting three epochs..." - sleep $(( $SECONDS_PER_SLOT * 32 * 3 )) + sleep $((($SLOT_DURATION_MS / 1000) * 32 * 3 )) # Get VC4 validator keys keys_path=$SCRIPT_DIR/$ENCLAVE_NAME/node_4/validators @@ -176,7 +176,7 @@ EOF # # See: https://lighthouse-book.sigmaprime.io/api_validator_inclusion.html echo "Waiting two more epochs..." - sleep $(( $SECONDS_PER_SLOT * 32 * 2 )) + sleep $(( $SLOT_DURATION_MS / 1000 * 32 * 2 )) for val in 0x*; do [[ -e $val ]] || continue is_attester=$(run_command_without_exit "curl -s $BN1_HTTP_ADDRESS/lighthouse/validator_inclusion/5/$val | jq | grep -q '\"is_previous_epoch_target_attester\": true'") diff --git a/scripts/tests/genesis-sync-config-electra.yaml b/scripts/tests/genesis-sync-config-electra.yaml index 1d1ed4d3152..0e41a5d1655 100644 --- a/scripts/tests/genesis-sync-config-electra.yaml +++ b/scripts/tests/genesis-sync-config-electra.yaml @@ -11,7 +11,7 @@ participants: cl_image: lighthouse:local validator_count: 0 network_params: - seconds_per_slot: 6 + slot_duration_ms: 6000 electra_fork_epoch: 0 fulu_fork_epoch: 100000 # a really big number so this test stays in electra preset: "minimal" diff --git a/scripts/tests/genesis-sync-config-fulu.yaml b/scripts/tests/genesis-sync-config-fulu.yaml index 6d2c2647a90..5ca76a7736f 100644 --- a/scripts/tests/genesis-sync-config-fulu.yaml +++ b/scripts/tests/genesis-sync-config-fulu.yaml @@ -20,7 +20,7 @@ participants: supernode: false validator_count: 0 network_params: - seconds_per_slot: 6 + slot_duration_ms: 6000 fulu_fork_epoch: 0 preset: "minimal" additional_services: diff --git a/scripts/tests/network_params.yaml b/scripts/tests/network_params.yaml index 35916ac1e4e..30654deaae9 100644 --- a/scripts/tests/network_params.yaml +++ b/scripts/tests/network_params.yaml @@ -10,7 +10,7 @@ participants: count: 4 network_params: fulu_fork_epoch: 0 - seconds_per_slot: 3 + slot_duration_ms: 3000 num_validator_keys_per_node: 20 global_log_level: debug snooper_enabled: false diff --git a/testing/ef_tests/src/cases/fork_choice.rs b/testing/ef_tests/src/cases/fork_choice.rs index 8e9d438a243..872cbd49677 100644 --- a/testing/ef_tests/src/cases/fork_choice.rs +++ b/testing/ef_tests/src/cases/fork_choice.rs @@ -471,7 +471,7 @@ impl Tester { let since_genesis = tick .checked_sub(genesis_time) .ok_or_else(|| Error::FailedToParseTest("tick is prior to genesis".into()))?; - let slots_since_genesis = since_genesis / self.spec.seconds_per_slot; + let slots_since_genesis = since_genesis / self.spec.get_slot_duration().as_secs(); Ok(self.spec.genesis_slot + slots_since_genesis) } diff --git a/testing/simulator/src/basic_sim.rs b/testing/simulator/src/basic_sim.rs index 23ec70ae5d8..d14afa1f4df 100644 --- a/testing/simulator/src/basic_sim.rs +++ b/testing/simulator/src/basic_sim.rs @@ -175,8 +175,8 @@ pub fn run_basic_sim(matches: &ArgMatches) -> Result<(), String> { let latest_fork_version = spec.electra_fork_version; let latest_fork_start_epoch = ELECTRA_FORK_EPOCH; - spec.seconds_per_slot /= speed_up_factor; - spec.seconds_per_slot = max(1, spec.seconds_per_slot); + spec.slot_duration_ms /= speed_up_factor; + spec.slot_duration_ms = max(1_000, spec.slot_duration_ms); spec.genesis_delay = genesis_delay; spec.min_genesis_time = 0; spec.min_genesis_active_validator_count = total_validator_count as u64; @@ -188,7 +188,7 @@ pub fn run_basic_sim(matches: &ArgMatches) -> Result<(), String> { let spec = Arc::new(spec); env.eth2_config.spec = spec.clone(); - let slot_duration = Duration::from_secs(spec.seconds_per_slot); + let slot_duration = Duration::from_millis(spec.slot_duration_ms); let slots_per_epoch = MinimalEthSpec::slots_per_epoch(); let initial_validator_count = spec.min_genesis_active_validator_count as usize; diff --git a/testing/simulator/src/fallback_sim.rs b/testing/simulator/src/fallback_sim.rs index 6e0db52d755..035610f2025 100644 --- a/testing/simulator/src/fallback_sim.rs +++ b/testing/simulator/src/fallback_sim.rs @@ -179,8 +179,8 @@ pub fn run_fallback_sim(matches: &ArgMatches) -> Result<(), String> { let genesis_delay = GENESIS_DELAY; - spec.seconds_per_slot /= speed_up_factor; - spec.seconds_per_slot = max(1, spec.seconds_per_slot); + spec.slot_duration_ms /= speed_up_factor; + spec.slot_duration_ms = max(1000, spec.slot_duration_ms); spec.genesis_delay = genesis_delay; spec.min_genesis_time = 0; spec.min_genesis_active_validator_count = total_validator_count as u64; @@ -193,7 +193,7 @@ pub fn run_fallback_sim(matches: &ArgMatches) -> Result<(), String> { let spec = Arc::new(spec); env.eth2_config.spec = spec.clone(); - let slot_duration = Duration::from_secs(spec.seconds_per_slot); + let slot_duration = Duration::from_millis(spec.slot_duration_ms); let slots_per_epoch = MinimalEthSpec::slots_per_epoch(); let disconnection_epoch = 1; diff --git a/testing/simulator/src/local_network.rs b/testing/simulator/src/local_network.rs index bd22a21511d..a7e47c5b67b 100644 --- a/testing/simulator/src/local_network.rs +++ b/testing/simulator/src/local_network.rs @@ -73,23 +73,33 @@ fn default_mock_execution_config( if let Some(capella_fork_epoch) = spec.capella_fork_epoch { mock_execution_config.shanghai_time = Some( genesis_time - + spec.seconds_per_slot * E::slots_per_epoch() * capella_fork_epoch.as_u64(), + + (spec.get_slot_duration().as_secs()) + * E::slots_per_epoch() + * capella_fork_epoch.as_u64(), ) } if let Some(deneb_fork_epoch) = spec.deneb_fork_epoch { mock_execution_config.cancun_time = Some( - genesis_time + spec.seconds_per_slot * E::slots_per_epoch() * deneb_fork_epoch.as_u64(), + genesis_time + + (spec.get_slot_duration().as_secs()) + * E::slots_per_epoch() + * deneb_fork_epoch.as_u64(), ) } if let Some(electra_fork_epoch) = spec.electra_fork_epoch { mock_execution_config.prague_time = Some( genesis_time - + spec.seconds_per_slot * E::slots_per_epoch() * electra_fork_epoch.as_u64(), + + (spec.get_slot_duration().as_secs()) + * E::slots_per_epoch() + * electra_fork_epoch.as_u64(), ) } if let Some(fulu_fork_epoch) = spec.fulu_fork_epoch { mock_execution_config.osaka_time = Some( - genesis_time + spec.seconds_per_slot * E::slots_per_epoch() * fulu_fork_epoch.as_u64(), + genesis_time + + (spec.get_slot_duration().as_secs()) + * E::slots_per_epoch() + * fulu_fork_epoch.as_u64(), ) } diff --git a/validator_client/beacon_node_fallback/src/lib.rs b/validator_client/beacon_node_fallback/src/lib.rs index 2d75df2fa34..83ec344512d 100644 --- a/validator_client/beacon_node_fallback/src/lib.rs +++ b/validator_client/beacon_node_fallback/src/lib.rs @@ -476,9 +476,9 @@ impl BeaconNodeFallback { } let timeouts: Timeouts = if new_list.len() == 1 || use_long_timeouts { - Timeouts::set_all(Duration::from_secs(self.spec.seconds_per_slot)) + Timeouts::set_all(Duration::from_millis(self.spec.slot_duration_ms)) } else { - Timeouts::use_optimized_timeouts(Duration::from_secs(self.spec.seconds_per_slot)) + Timeouts::use_optimized_timeouts(Duration::from_millis(self.spec.slot_duration_ms)) }; let new_candidates: Vec = new_list diff --git a/validator_client/src/lib.rs b/validator_client/src/lib.rs index 23541cf6e28..4f547ab0455 100644 --- a/validator_client/src/lib.rs +++ b/validator_client/src/lib.rs @@ -270,7 +270,7 @@ impl ProductionValidatorClient { let beacon_node_setup = |x: (usize, &SensitiveUrl)| { let i = x.0; let url = x.1; - let slot_duration = Duration::from_secs(context.eth2_config.spec.seconds_per_slot); + let slot_duration = Duration::from_millis(context.eth2_config.spec.slot_duration_ms); let mut beacon_node_http_client_builder = ClientBuilder::new(); @@ -380,7 +380,7 @@ impl ProductionValidatorClient { let slot_clock = SystemTimeSlotClock::new( context.eth2_config.spec.genesis_slot, Duration::from_secs(genesis_time), - Duration::from_secs(context.eth2_config.spec.seconds_per_slot), + Duration::from_millis(context.eth2_config.spec.slot_duration_ms), ); beacon_nodes.set_slot_clock(slot_clock.clone()); diff --git a/validator_client/validator_services/src/attestation_service.rs b/validator_client/validator_services/src/attestation_service.rs index 587d4668b8a..54fc3f3c155 100644 --- a/validator_client/validator_services/src/attestation_service.rs +++ b/validator_client/validator_services/src/attestation_service.rs @@ -144,7 +144,7 @@ impl AttestationService AttestationService AttestationService( executor: TaskExecutor, spec: &ChainSpec, ) -> Result<(), String> { - let slot_duration = Duration::from_secs(spec.seconds_per_slot); + let slot_duration = Duration::from_millis(spec.slot_duration_ms); let interval_fut = async move { loop { diff --git a/validator_client/validator_services/src/preparation_service.rs b/validator_client/validator_services/src/preparation_service.rs index 063b11512f9..739b2886a39 100644 --- a/validator_client/validator_services/src/preparation_service.rs +++ b/validator_client/validator_services/src/preparation_service.rs @@ -174,7 +174,7 @@ impl PreparationService Result<(), String> { - let slot_duration = Duration::from_secs(spec.seconds_per_slot); + let slot_duration = Duration::from_millis(spec.slot_duration_ms); info!("Proposer preparation service started"); let executor = self.executor.clone(); @@ -214,7 +214,7 @@ impl PreparationService SyncCommitteeService SyncCommitteeService SyncCommitteeService(genesis_time: u64, spec: &ChainSpec) -> Opt let slot_clock = SystemTimeSlotClock::new( spec.genesis_slot, Duration::from_secs(genesis_time), - Duration::from_secs(spec.seconds_per_slot), + Duration::from_millis(spec.slot_duration_ms), ); slot_clock.now().map(|s| s.epoch(E::slots_per_epoch())) } diff --git a/validator_manager/src/list_validators.rs b/validator_manager/src/list_validators.rs index f7a09f8d8e7..7cc31d1b7ad 100644 --- a/validator_manager/src/list_validators.rs +++ b/validator_manager/src/list_validators.rs @@ -185,7 +185,9 @@ async fn run(config: ListConfig) -> Result {