diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index e3de8d73245..fa52f109720 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -413,9 +413,9 @@ pub struct BeaconChain { /// Maintains a record of which validators have proposed blocks for each slot. pub observed_block_producers: RwLock>, /// Maintains a record of blob sidecars seen over the gossip network. - pub observed_blob_sidecars: RwLock>>, + pub observed_blob_sidecars: RwLock, T::EthSpec>>, /// Maintains a record of column sidecars seen over the gossip network. - pub observed_column_sidecars: RwLock>>, + pub observed_column_sidecars: RwLock, T::EthSpec>>, /// Maintains a record of slashable message seen over the gossip network or RPC. pub observed_slashable: RwLock>, /// Maintains a record of which validators have submitted voluntary exits. @@ -1131,13 +1131,18 @@ impl BeaconChain { .or_else(|| self.early_attester_cache.get_data_columns(block_root)); if let Some(mut all_cached_columns) = all_cached_columns_opt { - all_cached_columns.retain(|col| indices.contains(&col.index)); + all_cached_columns.retain(|col| indices.contains(col.index())); Ok(all_cached_columns) - } else { + } else if let Some(block) = self.get_blinded_block(&block_root)? { indices .iter() - .filter_map(|index| self.get_data_column(&block_root, index).transpose()) + .filter_map(|index| { + self.get_data_column(&block_root, index, block.fork_name_unchecked()) + .transpose() + }) .collect::>() + } else { + Ok(vec![]) } } @@ -1222,8 +1227,11 @@ impl BeaconChain { pub fn get_data_columns( &self, block_root: &Hash256, + fork_name: ForkName, ) -> Result>, Error> { - self.store.get_data_columns(block_root).map_err(Error::from) + self.store + .get_data_columns(block_root, fork_name) + .map_err(Error::from) } /// Returns the blobs at the given root, if any. @@ -1244,7 +1252,8 @@ impl BeaconChain { }; if self.spec.is_peer_das_enabled_for_epoch(block.epoch()) { - if let Some(columns) = self.store.get_data_columns(block_root)? { + let fork_name = self.spec.fork_name_at_epoch(block.epoch()); + if let Some(columns) = self.store.get_data_columns(block_root, fork_name)? { let num_required_columns = T::EthSpec::number_of_columns() / 2; let reconstruction_possible = columns.len() >= num_required_columns; if reconstruction_possible { @@ -1260,7 +1269,7 @@ impl BeaconChain { Ok(None) } } else { - self.get_blobs(block_root).map(|b| b.blobs()) + Ok(self.get_blobs(block_root)?.blobs()) } } @@ -1272,8 +1281,11 @@ impl BeaconChain { &self, block_root: &Hash256, column_index: &ColumnIndex, + fork_name: ForkName, ) -> Result>>, Error> { - Ok(self.store.get_data_column(block_root, column_index)?) + Ok(self + .store + .get_data_column(block_root, column_index, fork_name)?) } pub fn get_blinded_block( @@ -3183,7 +3195,7 @@ impl BeaconChain { .cached_data_column_indexes(block_root) .unwrap_or_default(); let new_data_columns = - data_columns_iter.filter(|b| !imported_data_columns.contains(&b.index)); + data_columns_iter.filter(|b| !imported_data_columns.contains(b.index())); for data_column in new_data_columns { event_handler.register(EventKind::DataColumnSidecar( @@ -3223,7 +3235,14 @@ impl BeaconChain { // Reject RPC columns referencing unknown parents. Otherwise we allow potentially invalid data // into the da_checker, where invalid = descendant of invalid blocks. // Note: custody_columns should have at least one item and all items have the same parent root. - if let Some(parent_root) = custody_columns.iter().map(|c| c.block_parent_root()).next() + // TODO(gloas) ensure this check is no longer relevant post gloas + if let Some(parent_root) = custody_columns + .iter() + .filter_map(|c| match c.as_ref() { + DataColumnSidecar::Fulu(column) => Some(column.block_parent_root()), + _ => None, + }) + .next() && !self .canonical_head .fork_choice_read_lock() @@ -3543,8 +3562,11 @@ impl BeaconChain { publish_fn: impl FnOnce() -> Result<(), BlockError>, ) -> Result { if let Some(slasher) = self.slasher.as_ref() { - for data_colum in &data_columns { - slasher.accept_block_header(data_colum.signed_block_header()); + for data_column in &data_columns { + // TODO(gloas) no block header post-gloas, what should we do here + if let DataColumnSidecar::Fulu(c) = data_column.as_data_column() { + slasher.accept_block_header(c.signed_block_header.clone()); + } } } @@ -3622,9 +3644,15 @@ impl BeaconChain { .put_kzg_verified_blobs(block_root, blobs)? } EngineGetBlobsOutput::CustodyColumns(data_columns) => { + // TODO(gloas) verify that this check is no longer relevant for gloas self.check_data_column_sidecar_header_signature_and_slashability( block_root, - data_columns.iter().map(|c| c.as_data_column()), + data_columns + .iter() + .filter_map(|c| match c.as_data_column() { + DataColumnSidecar::Fulu(column) => Some(column), + _ => None, + }), )?; self.data_availability_checker .put_kzg_verified_custody_data_columns(block_root, data_columns)? @@ -3643,9 +3671,13 @@ impl BeaconChain { block_root: Hash256, custody_columns: DataColumnSidecarList, ) -> Result { + // TODO(gloas) ensure that this check is no longer relevant post gloas self.check_data_column_sidecar_header_signature_and_slashability( block_root, - custody_columns.iter().map(|c| c.as_ref()), + custody_columns.iter().filter_map(|c| match c.as_ref() { + DataColumnSidecar::Fulu(fulu) => Some(fulu), + _ => None, + }), )?; // This slot value is purely informative for the consumers of @@ -3663,7 +3695,7 @@ impl BeaconChain { fn check_data_column_sidecar_header_signature_and_slashability<'a>( self: &Arc, block_root: Hash256, - custody_columns: impl IntoIterator>, + custody_columns: impl IntoIterator>, ) -> Result<(), BlockError> { let mut slashable_cache = self.observed_slashable.write(); // Process all unique block headers - previous logic assumed all headers were identical and @@ -7366,7 +7398,7 @@ impl BeaconChain { // Supernodes need to persist all sampled custody columns if columns_to_custody.len() != self.spec.number_of_custody_groups as usize { data_columns - .retain(|data_column| columns_to_custody.contains(&data_column.index)); + .retain(|data_column| columns_to_custody.contains(data_column.index())); } debug!( %block_root, @@ -7379,7 +7411,11 @@ impl BeaconChain { } /// Retrieves block roots (in ascending slot order) within some slot range from fork choice. - pub fn block_roots_from_fork_choice(&self, start_slot: u64, count: u64) -> Vec { + pub fn block_roots_from_fork_choice( + &self, + start_slot: u64, + count: u64, + ) -> Vec<(Hash256, Slot)> { let head_block_root = self.canonical_head.cached_head().head_block_root(); let fork_choice_read_lock = self.canonical_head.fork_choice_read_lock(); let block_roots_iter = fork_choice_read_lock @@ -7390,7 +7426,7 @@ impl BeaconChain { for (root, slot) in block_roots_iter { if slot < end_slot && slot >= start_slot { - roots.push(root); + roots.push((root, slot)); } if slot < start_slot { break; diff --git a/beacon_node/beacon_chain/src/blob_verification.rs b/beacon_node/beacon_chain/src/blob_verification.rs index 45374f509b2..11529c0018b 100644 --- a/beacon_node/beacon_chain/src/blob_verification.rs +++ b/beacon_node/beacon_chain/src/blob_verification.rs @@ -8,7 +8,7 @@ use crate::block_verification::{ BlockSlashInfo, get_validator_pubkey_cache, process_block_slash_info, }; use crate::kzg_utils::{validate_blob, validate_blobs}; -use crate::observed_data_sidecars::{ObservationStrategy, Observe}; +use crate::observed_data_sidecars::{Error as ObservedDataSidecarsError, ObservationStrategy, Observe}; use crate::{BeaconChainError, metrics}; use kzg::{Error as KzgError, Kzg, KzgCommitment}; use ssz_derive::{Decode, Encode}; @@ -451,7 +451,7 @@ pub fn validate_blob_sidecar_for_gossip( .observed_blob_sidecars .write() .observe_sidecar(blob_sidecar) - .map_err(|e| GossipBlobError::BeaconChainError(Box::new(e.into())))? + .map_err(|e: ObservedDataSidecarsError| GossipBlobError::BeaconChainError(Box::new(e.into())))? { return Err(GossipBlobError::RepeatBlob { proposer: blob_sidecar.block_proposer_index(), diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index dc38fc1c292..5dbe662b9bf 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -9,7 +9,7 @@ use crate::data_availability_checker::DataAvailabilityChecker; use crate::fork_choice_signal::ForkChoiceSignalTx; use crate::fork_revert::{reset_fork_choice_to_finalization, revert_to_fork_boundary}; use crate::graffiti_calculator::{GraffitiCalculator, GraffitiOrigin}; -use crate::kzg_utils::build_data_column_sidecars; +use crate::kzg_utils::{build_data_column_sidecars_fulu, build_data_column_sidecars_gloas}; use crate::light_client_server_cache::LightClientServerCache; use crate::migrate::{BackgroundMigrator, MigratorConfig}; use crate::observed_data_sidecars::ObservedDataSidecars; @@ -42,6 +42,7 @@ use std::time::Duration; use store::{Error as StoreError, HotColdDB, ItemStore, KeyValueStoreOp}; use task_executor::{ShutdownReason, TaskExecutor}; use tracing::{debug, error, info}; +use tree_hash::TreeHash; use types::data::CustodyIndex; use types::{ BeaconBlock, BeaconState, BlobSidecarList, ChainSpec, ColumnIndex, DataColumnSidecarList, @@ -1213,17 +1214,30 @@ fn build_data_columns_from_blobs( .blob_kzg_commitments() .cloned() .map_err(|e| format!("Unexpected pre Deneb block: {e:?}"))?; - let kzg_commitments_inclusion_proof = beacon_block_body - .kzg_commitments_merkle_proof() - .map_err(|e| format!("Failed to compute kzg commitments merkle proof: {e:?}"))?; - build_data_column_sidecars( - kzg_commitments, - kzg_commitments_inclusion_proof, - block.signed_block_header(), - blob_cells_and_proofs_vec, - spec, - ) - .map_err(|e| format!("Failed to compute weak subjectivity data_columns: {e:?}"))? + + if block.fork_name_unchecked().gloas_enabled() { + build_data_column_sidecars_gloas( + kzg_commitments, + block.message().tree_hash_root(), + block.slot(), + blob_cells_and_proofs_vec, + spec, + ) + .map_err(|e| format!("Failed to compute weak subjectivity data_columns: {e:?}"))? + } else { + let kzg_commitments_inclusion_proof = beacon_block_body + .kzg_commitments_merkle_proof() + .map_err(|e| format!("Failed to compute kzg commitments merkle proof: {e:?}"))?; + + build_data_column_sidecars_fulu( + kzg_commitments, + kzg_commitments_inclusion_proof, + block.signed_block_header(), + blob_cells_and_proofs_vec, + spec, + ) + .map_err(|e| format!("Failed to compute weak subjectivity data_columns: {e:?}"))? + } }; Ok(data_columns) } diff --git a/beacon_node/beacon_chain/src/data_availability_checker.rs b/beacon_node/beacon_chain/src/data_availability_checker.rs index 7aec24b8e52..05ef220b841 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker.rs @@ -19,10 +19,10 @@ use std::sync::Arc; use std::time::Duration; use task_executor::TaskExecutor; use tracing::{debug, error, instrument}; -use types::data::{BlobIdentifier, BlobSidecar, FixedBlobSidecarList}; +use types::data::{BlobIdentifier, FixedBlobSidecarList}; use types::{ - BlobSidecarList, BlockImportSource, ChainSpec, DataColumnSidecar, DataColumnSidecarList, Epoch, - EthSpec, Hash256, SignedBeaconBlock, Slot, + BlobSidecar, BlobSidecarList, BlockImportSource, ChainSpec, DataColumnSidecar, + DataColumnSidecarList, Epoch, EthSpec, Hash256, SignedBeaconBlock, Slot, }; mod error; @@ -187,7 +187,7 @@ impl DataAvailabilityChecker { self.availability_cache .peek_pending_components(block_root, |components| { components.is_some_and(|components| { - let cached_column_opt = components.get_cached_data_column(data_column.index); + let cached_column_opt = components.get_cached_data_column(*data_column.index()); cached_column_opt.is_some_and(|cached| *cached == *data_column) }) }) @@ -877,7 +877,9 @@ mod test { use std::time::Duration; use store::HotColdDB; use types::data::DataColumn; - use types::{ChainSpec, ColumnIndex, EthSpec, ForkName, MainnetEthSpec, Slot}; + use types::{ + ChainSpec, ColumnIndex, DataColumnSidecarFulu, EthSpec, ForkName, MainnetEthSpec, Slot, + }; type E = MainnetEthSpec; type T = EphemeralHarnessType; @@ -932,7 +934,7 @@ mod test { cgc_change_slot, data_columns .into_iter() - .filter(|d| requested_columns.contains(&d.index)) + .filter(|d| requested_columns.contains(d.index())) .collect(), ) .expect("should put rpc custody columns"); @@ -1007,7 +1009,7 @@ mod test { let requested_columns = &custody_columns[..10]; let gossip_columns = data_columns .into_iter() - .filter(|d| requested_columns.contains(&d.index)) + .filter(|d| requested_columns.contains(d.index())) .map(GossipVerifiedDataColumn::::__new_for_testing) .collect::>(); da_checker @@ -1039,7 +1041,7 @@ mod test { /// Regression test for KZG verification truncation bug (https://github.com/sigp/lighthouse/pull/7927) #[test] - fn verify_kzg_for_rpc_blocks_should_not_truncate_data_columns() { + fn verify_kzg_for_rpc_blocks_should_not_truncate_data_columns_fulu() { let spec = Arc::new(ForkName::Fulu.make_genesis_spec(E::default_spec())); let mut rng = StdRng::seed_from_u64(0xDEADBEEF0BAD5EEDu64); let da_checker = new_da_checker(spec.clone()); @@ -1065,10 +1067,17 @@ mod test { data_columns .into_iter() .map(|d| { - let invalid_sidecar = DataColumnSidecar { + let invalid_sidecar = DataColumnSidecar::Fulu(DataColumnSidecarFulu { column: DataColumn::::empty(), - ..d.as_ref().clone() - }; + index: *d.index(), + kzg_commitments: d.kzg_commitments().clone(), + kzg_proofs: d.kzg_proofs().clone(), + signed_block_header: d.signed_block_header().unwrap().clone(), + kzg_commitments_inclusion_proof: d + .kzg_commitments_inclusion_proof() + .unwrap() + .clone(), + }); CustodyDataColumn::from_asserted_custody(Arc::new(invalid_sidecar)) }) .collect::>() @@ -1126,7 +1135,7 @@ mod test { let custody_columns = custody_context.custody_columns_for_epoch(None, &spec); let custody_columns = custody_columns .iter() - .filter_map(|&col_idx| data_columns.iter().find(|d| d.index == col_idx).cloned()) + .filter_map(|&col_idx| data_columns.iter().find(|d| *d.index() == col_idx).cloned()) .take(64) .map(|d| { KzgVerifiedCustodyDataColumn::from_asserted_custody( diff --git a/beacon_node/beacon_chain/src/data_column_verification.rs b/beacon_node/beacon_chain/src/data_column_verification.rs index 7bb139756d9..3fad5dd3973 100644 --- a/beacon_node/beacon_chain/src/data_column_verification.rs +++ b/beacon_node/beacon_chain/src/data_column_verification.rs @@ -2,14 +2,14 @@ use crate::block_verification::{ BlockSlashInfo, get_validator_pubkey_cache, process_block_slash_info, }; use crate::kzg_utils::{reconstruct_data_columns, validate_data_columns}; -use crate::observed_data_sidecars::{ObservationStrategy, Observe}; +use crate::observed_data_sidecars::{Error as ObservedDataSidecarsError, ObservationStrategy, Observe}; use crate::{BeaconChain, BeaconChainError, BeaconChainTypes, metrics}; use educe::Educe; use fork_choice::ProtoBlock; use kzg::{Error as KzgError, Kzg}; use proto_array::Block; use slot_clock::SlotClock; -use ssz_derive::{Decode, Encode}; +use ssz_derive::Encode; use ssz_types::VariableList; use std::iter; use std::marker::PhantomData; @@ -17,13 +17,14 @@ use std::sync::Arc; use tracing::{debug, instrument}; use types::data::ColumnIndex; use types::{ - BeaconStateError, ChainSpec, DataColumnSidecar, DataColumnSubnetId, EthSpec, Hash256, - SignedBeaconBlockHeader, Slot, + BeaconStateError, ChainSpec, DataColumnSidecar, DataColumnSidecarFulu, DataColumnSubnetId, + EthSpec, Hash256, Slot, }; /// An error occurred while validating a gossip data column. #[derive(Debug)] pub enum GossipDataColumnError { + InvalidVariant, /// There was an error whilst processing the data column. It is not known if it is /// valid or invalid. /// @@ -64,7 +65,10 @@ pub enum GossipDataColumnError { /// ## Peer scoring /// /// The column is invalid or the peer is faulty. - InvalidSubnetId { received: u64, expected: u64 }, + InvalidSubnetId { + received: u64, + expected: u64, + }, /// The column sidecar is from a slot that is later than the current slot (with respect to the /// gossip clock disparity). /// @@ -97,20 +101,27 @@ pub enum GossipDataColumnError { /// ## Peer scoring /// /// The column is invalid and the peer is faulty. - ProposerIndexMismatch { sidecar: usize, local: usize }, + ProposerIndexMismatch { + sidecar: usize, + local: usize, + }, /// The provided columns's parent block is unknown. /// /// ## Peer scoring /// /// We cannot process the columns without validating its parent, the peer isn't necessarily faulty. - ParentUnknown { parent_root: Hash256 }, + ParentUnknown { + parent_root: Hash256, + }, /// The column conflicts with finalization, no need to propagate. /// /// ## Peer scoring /// /// It's unclear if this column is valid, but it conflicts with finality and shouldn't be /// imported. - NotFinalizedDescendant { block_parent_root: Hash256 }, + NotFinalizedDescendant { + block_parent_root: Hash256, + }, /// Invalid kzg commitment inclusion proof /// /// ## Peer scoring @@ -124,7 +135,6 @@ pub enum GossipDataColumnError { /// /// The peer isn't faulty, but we do not forward it over gossip. PriorKnown { - proposer: u64, slot: Slot, index: ColumnIndex, }, @@ -160,7 +170,10 @@ pub enum GossipDataColumnError { /// ## Peer scoring /// /// The column sidecar is invalid and the peer is faulty - InconsistentProofsLength { cells_len: usize, proofs_len: usize }, + InconsistentProofsLength { + cells_len: usize, + proofs_len: usize, + }, /// The number of KZG commitments exceeds the maximum number of blobs allowed for the fork. The /// sidecar is invalid. /// @@ -209,17 +222,37 @@ impl GossipVerifiedDataColumn subnet_id: DataColumnSubnetId, chain: &BeaconChain, ) -> Result { - let header = column_sidecar.signed_block_header.clone(); - // We only process slashing info if the gossip verification failed - // since we do not process the data column any further in that case. - validate_data_column_sidecar_for_gossip::(column_sidecar, subnet_id, chain).map_err( - |e| { - process_block_slash_info::<_, GossipDataColumnError>( + match column_sidecar.as_ref() { + DataColumnSidecar::Fulu(c) => { + let header = c.signed_block_header.clone(); + // We only process slashing info if the gossip verification failed + // since we do not process the data column any further in that case. + validate_data_column_sidecar_for_gossip_fulu::( + column_sidecar, + subnet_id, chain, - BlockSlashInfo::from_early_error_data_column(header, e), ) - }, - ) + .map_err(|e| { + process_block_slash_info::<_, GossipDataColumnError>( + chain, + BlockSlashInfo::from_early_error_data_column(header, e), + ) + }) + } + // TODO(gloas) support gloas data column variant + DataColumnSidecar::Gloas(_) => Err(GossipDataColumnError::InvalidVariant), + } + } + + /// Construct a `GossipVerifiedBlob` that is assumed to be valid. + /// + /// This should ONLY be used for testing. + pub fn __assumed_valid(column: Arc>) -> Self { + Self { + block_root: column.block_root(), + data_column: KzgVerifiedDataColumn { data: column }, + _phantom: PhantomData, + } } /// Create a `GossipVerifiedDataColumn` from `DataColumnSidecar` for block production ONLY. @@ -283,11 +316,7 @@ impl GossipVerifiedDataColumn } pub fn index(&self) -> ColumnIndex { - self.data_column.data.index - } - - pub fn signed_block_header(&self) -> SignedBeaconBlockHeader { - self.data_column.data.signed_block_header.clone() + *self.data_column.data.index() } pub fn into_inner(self) -> KzgVerifiedDataColumn { @@ -296,7 +325,7 @@ impl GossipVerifiedDataColumn } /// Wrapper over a `DataColumnSidecar` for which we have completed kzg verification. -#[derive(Debug, Educe, Clone, Encode, Decode)] +#[derive(Debug, Educe, Clone, Encode)] #[educe(PartialEq, Eq)] #[ssz(struct_behaviour = "transparent")] pub struct KzgVerifiedDataColumn { @@ -345,7 +374,7 @@ impl KzgVerifiedDataColumn { } pub fn index(&self) -> ColumnIndex { - self.data.index + *self.data.index() } } @@ -353,7 +382,7 @@ pub type CustodyDataColumnList = VariableList, ::NumberOfColumns>; /// Data column that we must custody -#[derive(Debug, Educe, Clone, Encode, Decode)] +#[derive(Debug, Educe, Clone, Encode)] #[educe(PartialEq, Eq, Hash(bound(E: EthSpec)))] #[ssz(struct_behaviour = "transparent")] pub struct CustodyDataColumn { @@ -378,12 +407,12 @@ impl CustodyDataColumn { self.data.clone() } pub fn index(&self) -> u64 { - self.data.index + *self.data.index() } } /// Data column that we must custody and has completed kzg verification -#[derive(Debug, Educe, Clone, Encode, Decode)] +#[derive(Debug, Educe, Clone, Encode)] #[educe(PartialEq, Eq)] #[ssz(struct_behaviour = "transparent")] pub struct KzgVerifiedCustodyDataColumn { @@ -443,7 +472,7 @@ impl KzgVerifiedCustodyDataColumn { self.data.clone() } pub fn index(&self) -> ColumnIndex { - self.data.index + *self.data.index() } } @@ -478,11 +507,15 @@ where } #[instrument(skip_all, level = "debug")] -pub fn validate_data_column_sidecar_for_gossip( +pub fn validate_data_column_sidecar_for_gossip_fulu( data_column: Arc>, subnet: DataColumnSubnetId, chain: &BeaconChain, ) -> Result, GossipDataColumnError> { + let DataColumnSidecar::Fulu(data_column_fulu) = data_column.as_ref() else { + return Err(GossipDataColumnError::InvalidVariant); + }; + let column_slot = data_column.slot(); verify_data_column_sidecar(&data_column, &chain.spec)?; verify_index_matches_subnet(&data_column, subnet, &chain.spec)?; @@ -506,10 +539,10 @@ pub fn validate_data_column_sidecar_for_gossip( data_column: &DataColumnSidecar, spec: &ChainSpec, ) -> Result<(), GossipDataColumnError> { - if data_column.index >= E::number_of_columns() as u64 { - return Err(GossipDataColumnError::InvalidColumnIndex(data_column.index)); + if *data_column.index() >= E::number_of_columns() as u64 { + return Err(GossipDataColumnError::InvalidColumnIndex( + *data_column.index(), + )); } - if data_column.kzg_commitments.is_empty() { + if data_column.kzg_commitments().is_empty() { return Err(GossipDataColumnError::UnexpectedDataColumn); } - let cells_len = data_column.column.len(); - let commitments_len = data_column.kzg_commitments.len(); - let proofs_len = data_column.kzg_proofs.len(); + let cells_len = data_column.column().len(); + let commitments_len = data_column.kzg_commitments().len(); + let proofs_len = data_column.kzg_proofs().len(); let max_blobs_per_block = spec.max_blobs_per_block(data_column.epoch()) as usize; if commitments_len > max_blobs_per_block { @@ -585,20 +620,19 @@ fn verify_is_unknown_sidecar( if chain .observed_column_sidecars .read() - .proposer_is_known(column_sidecar) - .map_err(|e| GossipDataColumnError::BeaconChainError(Box::new(e.into())))? + .observation_key_is_known(column_sidecar) + .map_err(|e: ObservedDataSidecarsError| GossipDataColumnError::BeaconChainError(Box::new(e.into())))? { return Err(GossipDataColumnError::PriorKnown { - proposer: column_sidecar.block_proposer_index(), slot: column_sidecar.slot(), - index: column_sidecar.index, + index: *column_sidecar.index(), }); } Ok(()) } fn verify_column_inclusion_proof( - data_column: &DataColumnSidecar, + data_column: &DataColumnSidecarFulu, ) -> Result<(), GossipDataColumnError> { let _timer = metrics::start_timer(&metrics::DATA_COLUMN_SIDECAR_INCLUSION_PROOF_VERIFICATION); if !data_column.verify_inclusion_proof() { @@ -622,7 +656,7 @@ fn verify_slot_higher_than_parent( } fn verify_parent_block_and_finalized_descendant( - data_column: Arc>, + data_column: &DataColumnSidecarFulu, chain: &BeaconChain, ) -> Result { let fork_choice = chain.canonical_head.fork_choice_read_lock(); @@ -646,7 +680,7 @@ fn verify_parent_block_and_finalized_descendant( } fn verify_proposer_and_signature( - data_column: &DataColumnSidecar, + data_column: &DataColumnSidecarFulu, parent_block: &ProtoBlock, chain: &BeaconChain, ) -> Result<(), GossipDataColumnError> { @@ -723,7 +757,7 @@ fn verify_index_matches_subnet( subnet: DataColumnSubnetId, spec: &ChainSpec, ) -> Result<(), GossipDataColumnError> { - let expected_subnet = DataColumnSubnetId::from_column_index(data_column.index, spec); + let expected_subnet = DataColumnSubnetId::from_column_index(*data_column.index(), spec); if expected_subnet != subnet { return Err(GossipDataColumnError::InvalidSubnetId { received: subnet.into(), @@ -787,12 +821,11 @@ pub fn observe_gossip_data_column( .observed_column_sidecars .write() .observe_sidecar(data_column_sidecar) - .map_err(|e| GossipDataColumnError::BeaconChainError(Box::new(e.into())))? + .map_err(|e: ObservedDataSidecarsError| GossipDataColumnError::BeaconChainError(Box::new(e.into())))? { return Err(GossipDataColumnError::PriorKnown { - proposer: data_column_sidecar.block_proposer_index(), slot: data_column_sidecar.slot(), - index: data_column_sidecar.index, + index: *data_column_sidecar.index(), }); } Ok(()) @@ -801,7 +834,8 @@ pub fn observe_gossip_data_column( #[cfg(test)] mod test { use crate::data_column_verification::{ - GossipDataColumnError, GossipVerifiedDataColumn, validate_data_column_sidecar_for_gossip, + GossipDataColumnError, GossipVerifiedDataColumn, + validate_data_column_sidecar_for_gossip_fulu, }; use crate::observed_data_sidecars::Observe; use crate::test_utils::{ @@ -810,12 +844,15 @@ mod test { use eth2::types::BlobsBundle; use execution_layer::test_utils::generate_blobs; use std::sync::Arc; - use types::{DataColumnSidecar, DataColumnSubnetId, EthSpec, ForkName, MainnetEthSpec}; + use types::{ + DataColumnSidecar, DataColumnSidecarFulu, DataColumnSubnetId, EthSpec, ForkName, + MainnetEthSpec, + }; type E = MainnetEthSpec; #[tokio::test] - async fn test_validate_data_column_sidecar_for_gossip() { + async fn test_validate_data_column_sidecar_for_gossip_fulu() { // Setting up harness is slow, we initialise once and use it for all gossip validation tests. let spec = ForkName::Fulu.make_genesis_spec(E::default_spec()); let harness = BeaconChainHarness::builder(E::default()) @@ -827,19 +864,19 @@ mod test { harness.advance_slot(); let verify_fn = |column_sidecar: DataColumnSidecar| { - let col_index = column_sidecar.index; - validate_data_column_sidecar_for_gossip::<_, Observe>( + let col_index = *column_sidecar.index(); + validate_data_column_sidecar_for_gossip_fulu::<_, Observe>( column_sidecar.into(), DataColumnSubnetId::from_column_index(col_index, &harness.spec), &harness.chain, ) }; - empty_data_column_sidecars_fails_validation(&harness, &verify_fn).await; + empty_data_column_sidecars_fails_validation_fulu(&harness, &verify_fn).await; data_column_sidecar_commitments_exceed_max_blobs_per_block(&harness, &verify_fn).await; } #[tokio::test] - async fn test_new_for_block_publishing() { + async fn test_new_for_block_publishing_fulu() { // Setting up harness is slow, we initialise once and use it for all gossip validation tests. let spec = ForkName::Fulu.make_genesis_spec(E::default_spec()); let harness = BeaconChainHarness::builder(E::default()) @@ -856,11 +893,11 @@ mod test { &harness.chain, ) }; - empty_data_column_sidecars_fails_validation(&harness, &verify_fn).await; + empty_data_column_sidecars_fails_validation_fulu(&harness, &verify_fn).await; data_column_sidecar_commitments_exceed_max_blobs_per_block(&harness, &verify_fn).await; } - async fn empty_data_column_sidecars_fails_validation( + async fn empty_data_column_sidecars_fails_validation_fulu( harness: &BeaconChainHarness>, verify_fn: &impl Fn(DataColumnSidecar) -> Result, ) { @@ -873,7 +910,7 @@ mod test { .await; let index = 0; - let column_sidecar = DataColumnSidecar:: { + let column_sidecar: DataColumnSidecar = DataColumnSidecar::Fulu(DataColumnSidecarFulu { index, column: vec![].try_into().unwrap(), kzg_commitments: vec![].try_into().unwrap(), @@ -884,7 +921,7 @@ mod test { .body() .kzg_commitments_merkle_proof() .unwrap(), - }; + }); let result = verify_fn(column_sidecar); assert!(matches!( diff --git a/beacon_node/beacon_chain/src/fetch_blobs/fetch_blobs_beacon_adapter.rs b/beacon_node/beacon_chain/src/fetch_blobs/fetch_blobs_beacon_adapter.rs index 9526921da73..a5dc7d7f8ba 100644 --- a/beacon_node/beacon_chain/src/fetch_blobs/fetch_blobs_beacon_adapter.rs +++ b/beacon_node/beacon_chain/src/fetch_blobs/fetch_blobs_beacon_adapter.rs @@ -1,5 +1,5 @@ use crate::fetch_blobs::{EngineGetBlobsOutput, FetchEngineBlobError}; -use crate::observed_block_producers::ProposalKey; +use crate::observed_data_sidecars::ObservationKey; use crate::{AvailabilityProcessingStatus, BeaconChain, BeaconChainTypes}; use execution_layer::json_structures::{BlobAndProofV1, BlobAndProofV2}; use kzg::Kzg; @@ -67,27 +67,25 @@ impl FetchBlobsBeaconAdapter { .map_err(FetchEngineBlobError::RequestFailed) } - pub(crate) fn blobs_known_for_proposal( + pub(crate) fn blobs_known_for_observation_key( &self, - proposer: u64, - slot: Slot, + observation_key: ObservationKey, ) -> Option> { - let proposer_key = ProposalKey::new(proposer, slot); self.chain .observed_blob_sidecars .read() - .known_for_proposal(&proposer_key) + .known_for_observation_key(&observation_key) .cloned() } - pub(crate) fn data_column_known_for_proposal( + pub(crate) fn data_column_known_for_observation_key( &self, - proposal_key: ProposalKey, + observation_key: ObservationKey, ) -> Option> { self.chain .observed_column_sidecars .read() - .known_for_proposal(&proposal_key) + .known_for_observation_key(&observation_key) .cloned() } diff --git a/beacon_node/beacon_chain/src/fetch_blobs/mod.rs b/beacon_node/beacon_chain/src/fetch_blobs/mod.rs index 6559f24d23d..22f72d4c16d 100644 --- a/beacon_node/beacon_chain/src/fetch_blobs/mod.rs +++ b/beacon_node/beacon_chain/src/fetch_blobs/mod.rs @@ -18,7 +18,7 @@ use crate::data_column_verification::{KzgVerifiedCustodyDataColumn, KzgVerifiedD #[cfg_attr(test, double)] use crate::fetch_blobs::fetch_blobs_beacon_adapter::FetchBlobsBeaconAdapter; use crate::kzg_utils::blobs_to_data_column_sidecars; -use crate::observed_block_producers::ProposalKey; +use crate::observed_data_sidecars::ObservationKey; use crate::validator_monitor::timestamp_now; use crate::{ AvailabilityProcessingStatus, BeaconChain, BeaconChainError, BeaconChainTypes, BlockError, @@ -193,8 +193,12 @@ async fn fetch_and_process_blobs_v1( &kzg_commitments_proof, )?; - if let Some(observed_blobs) = - chain_adapter.blobs_known_for_proposal(block.message().proposer_index(), block.slot()) + if let Some(observed_blobs) = ObservationKey::from_block_root::( + block_root, + block.slot(), + chain_adapter.spec(), + ) + .and_then(|key| chain_adapter.blobs_known_for_observation_key(key)) { blob_sidecar_list.retain(|blob| !observed_blobs.contains(&blob.blob_index())); if blob_sidecar_list.is_empty() { @@ -380,7 +384,7 @@ async fn compute_custody_columns_to_import( .map(|data_columns| { data_columns .into_iter() - .filter(|col| custody_columns_indices.contains(&col.index)) + .filter(|col| custody_columns_indices.contains(col.index())) .map(|col| { KzgVerifiedCustodyDataColumn::from_asserted_custody( KzgVerifiedDataColumn::from_execution_verified(col), @@ -391,9 +395,12 @@ async fn compute_custody_columns_to_import( .map_err(FetchEngineBlobError::DataColumnSidecarError)?; // Only consider columns that are not already observed on gossip. - if let Some(observed_columns) = chain_adapter_cloned.data_column_known_for_proposal( - ProposalKey::new(block.message().proposer_index(), block.slot()), - ) { + if let Some(observed_columns) = + ObservationKey::from_block_root::(block_root, block.slot(), &spec) + .and_then(|key| { + chain_adapter_cloned.data_column_known_for_observation_key(key) + }) + { custody_columns.retain(|col| !observed_columns.contains(&col.index())); if custody_columns.is_empty() { return Ok(vec![]); diff --git a/beacon_node/beacon_chain/src/fetch_blobs/tests.rs b/beacon_node/beacon_chain/src/fetch_blobs/tests.rs index cbe2f78fbda..b3deffa4d74 100644 --- a/beacon_node/beacon_chain/src/fetch_blobs/tests.rs +++ b/beacon_node/beacon_chain/src/fetch_blobs/tests.rs @@ -156,7 +156,7 @@ mod get_blobs_v2 { mock_fork_choice_contains_block(&mut mock_adapter, vec![]); // All data columns already seen on gossip mock_adapter - .expect_data_column_known_for_proposal() + .expect_data_column_known_for_observation_key() .returning(|_| Some(hashset![0, 1, 2])); // No blobs should be processed mock_adapter.expect_process_engine_blobs().times(0); @@ -193,7 +193,7 @@ mod get_blobs_v2 { mock_get_blobs_v2_response(&mut mock_adapter, Some(blobs_and_proofs)); mock_fork_choice_contains_block(&mut mock_adapter, vec![]); mock_adapter - .expect_data_column_known_for_proposal() + .expect_data_column_known_for_observation_key() .returning(|_| None); mock_adapter .expect_cached_data_column_indexes() @@ -332,8 +332,8 @@ mod get_blobs_v1 { .expect_cached_blob_indexes() .returning(|_| None); mock_adapter - .expect_blobs_known_for_proposal() - .returning(|_, _| None); + .expect_blobs_known_for_observation_key() + .returning(|_| None); // Returned blobs should be processed mock_process_engine_blobs_result( &mut mock_adapter, @@ -427,8 +427,8 @@ mod get_blobs_v1 { .expect_cached_blob_indexes() .returning(|_| None); mock_adapter - .expect_blobs_known_for_proposal() - .returning(move |_, _| Some(all_blob_indices.clone())); + .expect_blobs_known_for_observation_key() + .returning(move |_| Some(all_blob_indices.clone())); // **WHEN**: Trigger `fetch_blobs` on the block let custody_columns: [ColumnIndex; 3] = [0, 1, 2]; @@ -467,8 +467,8 @@ mod get_blobs_v1 { .expect_cached_blob_indexes() .returning(|_| None); mock_adapter - .expect_blobs_known_for_proposal() - .returning(|_, _| None); + .expect_blobs_known_for_observation_key() + .returning(|_| None); mock_process_engine_blobs_result( &mut mock_adapter, Ok(AvailabilityProcessingStatus::Imported(block_root)), diff --git a/beacon_node/beacon_chain/src/historical_data_columns.rs b/beacon_node/beacon_chain/src/historical_data_columns.rs index 6cf947adcb1..d6977d99852 100644 --- a/beacon_node/beacon_chain/src/historical_data_columns.rs +++ b/beacon_node/beacon_chain/src/historical_data_columns.rs @@ -61,12 +61,12 @@ impl BeaconChain { let unique_column_indices = historical_data_column_sidecar_list .iter() - .map(|item| item.index) + .map(|item| *item.index()) .collect::>(); let mut slot_and_column_index_to_data_columns = historical_data_column_sidecar_list .iter() - .map(|data_column| ((data_column.slot(), data_column.index), data_column)) + .map(|data_column| ((data_column.slot(), *data_column.index()), data_column)) .collect::>(); let forward_blocks_iter = self @@ -80,13 +80,14 @@ impl BeaconChain { let (block_root, slot) = block_iter_result .map_err(|e| HistoricalDataColumnError::BeaconChainError(Box::new(e)))?; + let fork_name = self.spec.fork_name_at_slot::(slot); for column_index in unique_column_indices.clone() { if let Some(data_column) = slot_and_column_index_to_data_columns.remove(&(slot, column_index)) { if self .store - .get_data_column(&block_root, &data_column.index)? + .get_data_column(&block_root, data_column.index(), fork_name)? .is_some() { continue; diff --git a/beacon_node/beacon_chain/src/kzg_utils.rs b/beacon_node/beacon_chain/src/kzg_utils.rs index a1c255e3b3c..44269b84d98 100644 --- a/beacon_node/beacon_chain/src/kzg_utils.rs +++ b/beacon_node/beacon_chain/src/kzg_utils.rs @@ -6,12 +6,13 @@ use rayon::prelude::*; use ssz_types::{FixedVector, VariableList}; use std::sync::Arc; use tracing::instrument; +use tree_hash::TreeHash; use types::data::{Cell, DataColumn, DataColumnSidecarError}; use types::kzg_ext::KzgCommitments; use types::{ - Blob, BlobSidecar, BlobSidecarList, ChainSpec, DataColumnSidecar, DataColumnSidecarList, - EthSpec, Hash256, KzgCommitment, KzgProof, SignedBeaconBlock, SignedBeaconBlockHeader, - SignedBlindedBeaconBlock, + Blob, BlobSidecar, BlobSidecarList, ChainSpec, DataColumnSidecar, DataColumnSidecarFulu, + DataColumnSidecarGloas, DataColumnSidecarList, EthSpec, Hash256, KzgCommitment, KzgProof, + SignedBeaconBlock, SignedBeaconBlockHeader, SignedBlindedBeaconBlock, Slot, }; /// Converts a blob ssz List object to an array to be used with the kzg @@ -59,22 +60,22 @@ where let mut commitments = Vec::new(); for data_column in data_column_iter { - let col_index = data_column.index; + let col_index = *data_column.index(); - if data_column.column.is_empty() { + if data_column.column().is_empty() { return Err((Some(col_index), KzgError::KzgVerificationFailed)); } - for cell in &data_column.column { + for cell in data_column.column() { cells.push(ssz_cell_to_crypto_cell::(cell).map_err(|e| (Some(col_index), e))?); column_indices.push(col_index); } - for &proof in &data_column.kzg_proofs { + for &proof in data_column.kzg_proofs() { proofs.push(Bytes48::from(proof)); } - for &commitment in &data_column.kzg_commitments { + for &commitment in data_column.kzg_commitments() { commitments.push(Bytes48::from(commitment)); } @@ -171,7 +172,6 @@ pub fn blobs_to_data_column_sidecars( .body() .blob_kzg_commitments() .map_err(|_err| DataColumnSidecarError::PreDeneb)?; - let kzg_commitments_inclusion_proof = block.message().body().kzg_commitments_merkle_proof()?; let signed_block_header = block.signed_block_header(); if cell_proofs.len() != blobs.len() * E::number_of_columns() { @@ -207,14 +207,27 @@ pub fn blobs_to_data_column_sidecars( }) .collect::, KzgError>>()?; - build_data_column_sidecars( - kzg_commitments.clone(), - kzg_commitments_inclusion_proof, - signed_block_header, - blob_cells_and_proofs_vec, - spec, - ) - .map_err(DataColumnSidecarError::BuildSidecarFailed) + if block.fork_name_unchecked().gloas_enabled() { + build_data_column_sidecars_gloas( + kzg_commitments.clone(), + signed_block_header.message.tree_hash_root(), + block.slot(), + blob_cells_and_proofs_vec, + spec, + ) + .map_err(DataColumnSidecarError::BuildSidecarFailed) + } else { + let kzg_commitments_inclusion_proof = + block.message().body().kzg_commitments_merkle_proof()?; + build_data_column_sidecars_fulu( + kzg_commitments.clone(), + kzg_commitments_inclusion_proof, + signed_block_header, + blob_cells_and_proofs_vec, + spec, + ) + .map_err(DataColumnSidecarError::BuildSidecarFailed) + } } pub fn compute_cells(blobs: &[&Blob], kzg: &Kzg) -> Result, KzgError> { @@ -235,13 +248,20 @@ pub fn compute_cells(blobs: &[&Blob], kzg: &Kzg) -> Result( +pub(crate) fn build_data_column_sidecars_fulu( kzg_commitments: KzgCommitments, kzg_commitments_inclusion_proof: FixedVector, signed_block_header: SignedBeaconBlockHeader, blob_cells_and_proofs_vec: Vec, spec: &ChainSpec, ) -> Result, String> { + if spec + .fork_name_at_slot::(signed_block_header.message.slot) + .gloas_enabled() + { + return Err("Attempting to construct Fulu data columns post-Gloas".to_owned()); + } + let number_of_columns = E::number_of_columns(); let max_blobs_per_block = spec .max_blobs_per_block(signed_block_header.message.slot.epoch(E::slots_per_epoch())) @@ -283,7 +303,7 @@ pub(crate) fn build_data_column_sidecars( .enumerate() .map( |(index, (col, proofs))| -> Result>, String> { - Ok(Arc::new(DataColumnSidecar { + Ok(Arc::new(DataColumnSidecar::Fulu(DataColumnSidecarFulu { index: index as u64, column: DataColumn::::try_from(col) .map_err(|e| format!("MaxBlobCommitmentsPerBlock exceeded: {e:?}"))?, @@ -292,7 +312,7 @@ pub(crate) fn build_data_column_sidecars( .map_err(|e| format!("MaxBlobCommitmentsPerBlock exceeded: {e:?}"))?, signed_block_header: signed_block_header.clone(), kzg_commitments_inclusion_proof: kzg_commitments_inclusion_proof.clone(), - })) + }))) }, ) .collect(); @@ -300,6 +320,75 @@ pub(crate) fn build_data_column_sidecars( sidecars } +pub(crate) fn build_data_column_sidecars_gloas( + kzg_commitments: KzgCommitments, + beacon_block_root: Hash256, + slot: Slot, + blob_cells_and_proofs_vec: Vec, + spec: &ChainSpec, +) -> Result, String> { + if !spec.fork_name_at_slot::(slot).gloas_enabled() { + return Err("Attempting to construct Gloas data columns pre-Gloas".to_owned()); + } + + let number_of_columns = E::number_of_columns(); + let max_blobs_per_block = spec.max_blobs_per_block(slot.epoch(E::slots_per_epoch())) as usize; + let mut columns = vec![Vec::with_capacity(max_blobs_per_block); number_of_columns]; + let mut column_kzg_proofs = vec![Vec::with_capacity(max_blobs_per_block); number_of_columns]; + + for (blob_cells, blob_cell_proofs) in blob_cells_and_proofs_vec { + // we iterate over each column, and we construct the column from "top to bottom", + // pushing on the cell and the corresponding proof at each column index. we do this for + // each blob (i.e. the outer loop). + for col in 0..number_of_columns { + let cell = blob_cells + .get(col) + .ok_or(format!("Missing blob cell at index {col}"))?; + let cell: Vec = cell.to_vec(); + let cell = + Cell::::try_from(cell).map_err(|e| format!("BytesPerCell exceeded: {e:?}"))?; + + let proof = blob_cell_proofs + .get(col) + .ok_or(format!("Missing blob cell KZG proof at index {col}"))?; + + let column = columns + .get_mut(col) + .ok_or(format!("Missing data column at index {col}"))?; + let column_proofs = column_kzg_proofs + .get_mut(col) + .ok_or(format!("Missing data column proofs at index {col}"))?; + + column.push(cell); + column_proofs.push(*proof); + } + } + + let sidecars: Result>>, String> = columns + .into_iter() + .zip(column_kzg_proofs) + .enumerate() + .map( + |(index, (col, proofs))| -> Result>, String> { + Ok(Arc::new(DataColumnSidecar::Gloas(DataColumnSidecarGloas { + index: index as u64, + column: DataColumn::::try_from(col) + .map_err(|e| format!("MaxBlobCommitmentsPerBlock exceeded: {e:?}"))?, + kzg_commitments: kzg_commitments.clone(), + kzg_proofs: VariableList::try_from(proofs) + .map_err(|e| format!("MaxBlobCommitmentsPerBlock exceeded: {e:?}"))?, + beacon_block_root, + slot, + }))) + }, + ) + .collect(); + + sidecars +} + +// TODO(gloas) blob reconstruction will fail post gloas. We should just return `Blob`s +// instead of a `BlobSidecar`. This might require a beacon api spec change as well. /// Reconstruct blobs from a subset of data column sidecars (requires at least 50%). /// /// If `blob_indices_opt` is `None`, this function attempts to reconstruct all blobs associated @@ -314,7 +403,7 @@ pub fn reconstruct_blobs( spec: &ChainSpec, ) -> Result, String> { // Sort data columns by index to ensure ascending order for KZG operations - data_columns.sort_unstable_by_key(|dc| dc.index); + data_columns.sort_unstable_by_key(|dc| *dc.index()); let first_data_column = data_columns .first() @@ -323,7 +412,7 @@ pub fn reconstruct_blobs( let blob_indices: Vec = match blob_indices_opt { Some(indices) => indices.into_iter().map(|i| i as usize).collect(), None => { - let num_of_blobs = first_data_column.kzg_commitments.len(); + let num_of_blobs = first_data_column.kzg_commitments().len(); (0..num_of_blobs).collect() } }; @@ -335,7 +424,7 @@ pub fn reconstruct_blobs( let mut cell_ids: Vec = vec![]; for data_column in &data_columns { let cell = data_column - .column + .column() .get(row_index) .ok_or(format!("Missing data column at row index {row_index}")) .and_then(|cell| { @@ -343,7 +432,7 @@ pub fn reconstruct_blobs( })?; cells.push(cell); - cell_ids.push(data_column.index); + cell_ids.push(*data_column.index()); } let num_cells_original_blob = E::number_of_columns() / 2; @@ -374,8 +463,13 @@ pub fn reconstruct_blobs( row_index, blob, signed_block, - first_data_column.signed_block_header.clone(), - &first_data_column.kzg_commitments_inclusion_proof, + first_data_column + .signed_block_header() + .map_err(|e| format!("{e:?}"))? + .clone(), + first_data_column + .kzg_commitments_inclusion_proof() + .map_err(|e| format!("{e:?}"))?, kzg_proof, ) .map(Arc::new) @@ -395,7 +489,7 @@ pub fn reconstruct_data_columns( spec: &ChainSpec, ) -> Result, KzgError> { // Sort data columns by index to ensure ascending order for KZG operations - data_columns.sort_unstable_by_key(|dc| dc.index); + data_columns.sort_unstable_by_key(|dc| *dc.index()); let first_data_column = data_columns .first() @@ -403,37 +497,47 @@ pub fn reconstruct_data_columns( "data_columns should have at least one element".to_string(), ))?; - let num_of_blobs = first_data_column.kzg_commitments.len(); - - let blob_cells_and_proofs_vec = - (0..num_of_blobs) - .into_par_iter() - .map(|row_index| { - let mut cells: Vec = vec![]; - let mut cell_ids: Vec = vec![]; - for data_column in &data_columns { - let cell = data_column.column.get(row_index).ok_or( - KzgError::InconsistentArrayLength(format!( - "Missing data column at row index {row_index}" - )), - )?; - - cells.push(ssz_cell_to_crypto_cell::(cell)?); - cell_ids.push(data_column.index); - } - kzg.recover_cells_and_compute_kzg_proofs(&cell_ids, &cells) - }) - .collect::, KzgError>>()?; - - // Clone sidecar elements from existing data column, no need to re-compute - build_data_column_sidecars( - first_data_column.kzg_commitments.clone(), - first_data_column.kzg_commitments_inclusion_proof.clone(), - first_data_column.signed_block_header.clone(), - blob_cells_and_proofs_vec, - spec, - ) - .map_err(KzgError::ReconstructFailed) + let num_of_blobs = first_data_column.kzg_commitments().len(); + + let blob_cells_and_proofs_vec = (0..num_of_blobs) + .into_par_iter() + .map(|row_index| { + let mut cells: Vec = vec![]; + let mut cell_ids: Vec = vec![]; + for data_column in &data_columns { + let cell = data_column.column().get(row_index).ok_or( + KzgError::InconsistentArrayLength(format!( + "Missing data column at row index {row_index}" + )), + )?; + + cells.push(ssz_cell_to_crypto_cell::(cell)?); + cell_ids.push(*data_column.index()); + } + kzg.recover_cells_and_compute_kzg_proofs(&cell_ids, &cells) + }) + .collect::, KzgError>>()?; + match first_data_column.as_ref() { + DataColumnSidecar::Fulu(first_column) => { + // Clone sidecar elements from existing data column, no need to re-compute + build_data_column_sidecars_fulu( + first_column.kzg_commitments.clone(), + first_column.kzg_commitments_inclusion_proof.clone(), + first_column.signed_block_header.clone(), + blob_cells_and_proofs_vec, + spec, + ) + .map_err(KzgError::ReconstructFailed) + } + DataColumnSidecar::Gloas(first_column) => build_data_column_sidecars_gloas( + first_column.kzg_commitments.clone(), + first_column.beacon_block_root, + first_column.slot, + blob_cells_and_proofs_vec, + spec, + ) + .map_err(KzgError::ReconstructFailed), + } } #[cfg(test)] @@ -460,7 +564,7 @@ mod test { let spec = ForkName::Fulu.make_genesis_spec(E::default_spec()); let kzg = get_kzg(); test_build_data_columns_empty(&kzg, &spec); - test_build_data_columns(&kzg, &spec); + test_build_data_columns_fulu(&kzg, &spec); test_reconstruct_data_columns(&kzg, &spec); test_reconstruct_data_columns_unordered(&kzg, &spec); test_reconstruct_blobs_from_data_columns(&kzg, &spec); @@ -494,8 +598,10 @@ mod test { assert!(column_sidecars.is_empty()); } + // TODO(gloas) create `test_build_data_columns_gloas` and make sure its called + // in the relevant places #[track_caller] - fn test_build_data_columns(kzg: &Kzg, spec: &ChainSpec) { + fn test_build_data_columns_fulu(kzg: &Kzg, spec: &ChainSpec) { // Using at least 2 blobs to make sure we're arranging the data columns correctly. let num_of_blobs = 2; let (signed_block, blobs, proofs) = @@ -520,18 +626,21 @@ mod test { assert_eq!(column_sidecars.len(), E::number_of_columns()); for (idx, col_sidecar) in column_sidecars.iter().enumerate() { - assert_eq!(col_sidecar.index, idx as u64); + assert_eq!(*col_sidecar.index(), idx as u64); - assert_eq!(col_sidecar.kzg_commitments.len(), num_of_blobs); - assert_eq!(col_sidecar.column.len(), num_of_blobs); - assert_eq!(col_sidecar.kzg_proofs.len(), num_of_blobs); + assert_eq!(col_sidecar.kzg_commitments().len(), num_of_blobs); + assert_eq!(col_sidecar.column().len(), num_of_blobs); + assert_eq!(col_sidecar.kzg_proofs().len(), num_of_blobs); - assert_eq!(col_sidecar.kzg_commitments, block_kzg_commitments); + assert_eq!(col_sidecar.kzg_commitments().clone(), block_kzg_commitments); assert_eq!( - col_sidecar.kzg_commitments_inclusion_proof, + col_sidecar + .kzg_commitments_inclusion_proof() + .unwrap() + .clone(), block_kzg_commitments_inclusion_proof ); - assert!(col_sidecar.verify_inclusion_proof()); + assert!(col_sidecar.as_fulu().unwrap().verify_inclusion_proof()); } } diff --git a/beacon_node/beacon_chain/src/observed_data_sidecars.rs b/beacon_node/beacon_chain/src/observed_data_sidecars.rs index 46a3678f167..b04a0052825 100644 --- a/beacon_node/beacon_chain/src/observed_data_sidecars.rs +++ b/beacon_node/beacon_chain/src/observed_data_sidecars.rs @@ -3,11 +3,13 @@ //! Only `BlobSidecar`s that have completed proposer signature verification can be added //! to this cache to reduce DoS risks. -use crate::observed_block_producers::ProposalKey; use std::collections::{HashMap, HashSet}; use std::marker::PhantomData; use std::sync::Arc; -use types::{BlobSidecar, ChainSpec, DataColumnSidecar, EthSpec, Slot}; +use types::{BlobSidecar, ChainSpec, DataColumnSidecar, EthSpec, Hash256, Slot}; + +type ValidatorIndex = u64; +type BeaconBlockRoot = Hash256; #[derive(Debug, PartialEq)] pub enum Error { @@ -18,12 +20,16 @@ pub enum Error { /// Note: The invalid data should have been caught and flagged as an error much before reaching /// here. InvalidDataIndex(u64), + + // An unexpected data sidecar variant was received + UnexpectedVariant } pub trait ObservableDataSidecar { fn slot(&self) -> Slot; - fn block_proposer_index(&self) -> u64; fn index(&self) -> u64; + fn proposer_index(&self) -> Option; + fn beacon_block_root(&self) -> BeaconBlockRoot; fn max_num_of_items(spec: &ChainSpec, slot: Slot) -> usize; } @@ -32,14 +38,18 @@ impl ObservableDataSidecar for BlobSidecar { self.slot() } - fn block_proposer_index(&self) -> u64 { - self.block_proposer_index() - } - fn index(&self) -> u64 { self.index } + fn proposer_index(&self) -> Option { + Some(self.block_proposer_index()) + } + + fn beacon_block_root(&self) -> BeaconBlockRoot { + self.block_root() + } + fn max_num_of_items(spec: &ChainSpec, slot: Slot) -> usize { spec.max_blobs_per_block(slot.epoch(E::slots_per_epoch())) as usize } @@ -50,12 +60,16 @@ impl ObservableDataSidecar for DataColumnSidecar { self.slot() } - fn block_proposer_index(&self) -> u64 { - self.block_proposer_index() + fn index(&self) -> u64 { + *self.index() } - fn index(&self) -> u64 { - self.index + fn proposer_index(&self) -> Option { + self.as_fulu().map(|d| d.block_proposer_index()).ok() + } + + fn beacon_block_root(&self) -> BeaconBlockRoot { + self.block_root() } fn max_num_of_items(_spec: &ChainSpec, _slot: Slot) -> usize { @@ -63,6 +77,58 @@ impl ObservableDataSidecar for DataColumnSidecar { } } +#[derive(Hash, PartialEq, Eq)] +pub enum ObservationKey { + ProposerKey((ValidatorIndex, Slot)), + BlockRootKey((BeaconBlockRoot, Slot)), +} + +impl ObservationKey { + pub fn new(sidecar: &T, spec: &ChainSpec) -> Result { + let slot = sidecar.slot(); + + if spec.fork_name_at_slot::(slot).gloas_enabled() { + Ok(Self::new_block_root_key(sidecar.beacon_block_root(), slot)) + } else { + Self::new_proposer_key(sidecar.proposer_index(), slot) + } + } + + /// Create an observation key for lookup when we only have block_root and slot. + /// This is only valid for gloas and later forks where observations are keyed by block_root. + /// Returns `None` if the slot is pre-gloas (where observations are keyed by proposer_index). + pub fn from_block_root( + block_root: BeaconBlockRoot, + slot: Slot, + spec: &ChainSpec, + ) -> Option { + if spec.fork_name_at_slot::(slot).gloas_enabled() { + Some(Self::new_block_root_key(block_root, slot)) + } else { + None + } + } + + fn new_proposer_key(proposer_index: Option, slot: Slot) -> Result { + if let Some(proposer_index) = proposer_index { + Ok(Self::ProposerKey((proposer_index, slot))) + } else { + Err(Error::UnexpectedVariant) + } + } + + fn new_block_root_key(beacon_block_root: BeaconBlockRoot, slot: Slot) -> Self { + Self::BlockRootKey((beacon_block_root, slot)) + } + + pub fn slot(&self) -> Slot { + match self { + ObservationKey::ProposerKey((_, slot)) => *slot, + ObservationKey::BlockRootKey((_, slot)) => *slot, + } + } +} + /// Maintains a cache of seen `ObservableDataSidecar`s that are received over gossip /// and have been gossip verified. /// @@ -71,15 +137,15 @@ impl ObservableDataSidecar for DataColumnSidecar { /// /// Note: To prevent DoS attacks, this cache must include only items that have received some DoS resistance /// like checking the proposer signature. -pub struct ObservedDataSidecars { +pub struct ObservedDataSidecars { finalized_slot: Slot, - /// Stores all received data indices for a given `(ValidatorIndex, Slot)` tuple. - items: HashMap>, + /// Stores all received data indices for a given `ObservationKey`. + items: HashMap>, spec: Arc, - _phantom: PhantomData, + _phantom: PhantomData<(T, E)>, } -impl ObservedDataSidecars { +impl ObservedDataSidecars { /// Instantiates `Self` with `finalized_slot == 0`. pub fn new(spec: Arc) -> Self { Self { @@ -97,12 +163,11 @@ impl ObservedDataSidecars { pub fn observe_sidecar(&mut self, data_sidecar: &T) -> Result { self.sanitize_data_sidecar(data_sidecar)?; + let observation_key = ObservationKey::new::(data_sidecar, &self.spec)?; + let data_indices = self .items - .entry(ProposalKey { - slot: data_sidecar.slot(), - proposer: data_sidecar.block_proposer_index(), - }) + .entry(observation_key) .or_insert_with(|| { HashSet::with_capacity(T::max_num_of_items(&self.spec, data_sidecar.slot())) }); @@ -112,20 +177,23 @@ impl ObservedDataSidecars { } /// Returns `true` if the `data_sidecar` has already been observed in the cache within the prune window. - pub fn proposer_is_known(&self, data_sidecar: &T) -> Result { + pub fn observation_key_is_known(&self, data_sidecar: &T) -> Result { self.sanitize_data_sidecar(data_sidecar)?; + + let observation_key = ObservationKey::new::(data_sidecar, &self.spec)?; + let is_known = self .items - .get(&ProposalKey { - slot: data_sidecar.slot(), - proposer: data_sidecar.block_proposer_index(), - }) + .get(&observation_key) .is_some_and(|indices| indices.contains(&data_sidecar.index())); Ok(is_known) } - pub fn known_for_proposal(&self, proposal_key: &ProposalKey) -> Option<&HashSet> { - self.items.get(proposal_key) + pub fn known_for_observation_key( + &self, + observation_key: &ObservationKey, + ) -> Option<&HashSet> { + self.items.get(observation_key) } fn sanitize_data_sidecar(&self, data_sidecar: &T) -> Result<(), Error> { @@ -150,7 +218,7 @@ impl ObservedDataSidecars { } self.finalized_slot = finalized_slot; - self.items.retain(|k, _| k.slot > finalized_slot); + self.items.retain(|k, _| k.slot() > finalized_slot); } } @@ -184,7 +252,6 @@ impl ObservationStrategy for DoNotObserve { mod tests { use super::*; use crate::test_utils::test_spec; - use bls::Hash256; use std::sync::Arc; use types::{Epoch, MainnetEthSpec}; @@ -201,7 +268,7 @@ mod tests { #[test] fn pruning() { let spec = Arc::new(test_spec::()); - let mut cache = ObservedDataSidecars::>::new(spec); + let mut cache = ObservedDataSidecars::, E>::new(spec.clone()); assert_eq!(cache.finalized_slot, 0, "finalized slot is zero"); assert_eq!(cache.items.len(), 0, "no slots should be present"); @@ -211,7 +278,7 @@ mod tests { let sidecar_a = get_blob_sidecar(0, proposer_index_a, 0); assert_eq!( - cache.observe_sidecar(&sidecar_a), + cache.observe_sidecar(sidecar_a.as_ref()), Ok(false), "can observe proposer, indicates proposer unobserved" ); @@ -227,9 +294,14 @@ mod tests { "only one (validator_index, slot) tuple should be present" ); + let observation_key = &ObservationKey::new::, E>( + sidecar_a.as_ref(), + &spec, + ).unwrap(); + let cached_blob_indices = cache .items - .get(&ProposalKey::new(proposer_index_a, Slot::new(0))) + .get(observation_key) .expect("slot zero should be present"); assert_eq!( cached_blob_indices.len(), @@ -243,11 +315,16 @@ mod tests { cache.prune(Slot::new(0)); + let observation_key = ObservationKey::new::, E>( + sidecar_a.as_ref(), + &spec, + ).unwrap(); + assert_eq!(cache.finalized_slot, 0, "finalized slot is zero"); assert_eq!(cache.items.len(), 1, "only one slot should be present"); let cached_blob_indices = cache .items - .get(&ProposalKey::new(proposer_index_a, Slot::new(0))) + .get(&observation_key) .expect("slot zero should be present"); assert_eq!( cached_blob_indices.len(), @@ -275,7 +352,7 @@ mod tests { let block_b = get_blob_sidecar(E::slots_per_epoch(), 419, 0); assert_eq!( - cache.observe_sidecar(&block_b), + cache.observe_sidecar(block_b.as_ref()), Err(Error::FinalizedDataSidecar { slot: E::slots_per_epoch().into(), finalized_slot: E::slots_per_epoch().into(), @@ -296,15 +373,20 @@ mod tests { let block_b = get_blob_sidecar(three_epochs, proposer_index_b, 0); assert_eq!( - cache.observe_sidecar(&block_b), + cache.observe_sidecar(block_b.as_ref()), Ok(false), "can insert non-finalized block" ); + let observation_key = ObservationKey::new::, E>( + block_b.as_ref(), + &spec, + ).unwrap(); + assert_eq!(cache.items.len(), 1, "only one slot should be present"); let cached_blob_indices = cache .items - .get(&ProposalKey::new(proposer_index_b, Slot::new(three_epochs))) + .get(&observation_key) .expect("the three epochs slot should be present"); assert_eq!( cached_blob_indices.len(), @@ -325,10 +407,15 @@ mod tests { "finalized slot is updated" ); + let observation_key = ObservationKey::new::, E>( + block_b.as_ref(), + &spec, + ).unwrap(); + assert_eq!(cache.items.len(), 1, "only one slot should be present"); let cached_blob_indices = cache .items - .get(&ProposalKey::new(proposer_index_b, Slot::new(three_epochs))) + .get(&observation_key) .expect("the three epochs slot should be present"); assert_eq!( cached_blob_indices.len(), @@ -340,32 +427,32 @@ mod tests { #[test] fn simple_observations() { let spec = Arc::new(test_spec::()); - let mut cache = ObservedDataSidecars::>::new(spec.clone()); + let mut cache = ObservedDataSidecars::, E>::new(spec.clone()); // Slot 0, index 0 let proposer_index_a = 420; let sidecar_a = get_blob_sidecar(0, proposer_index_a, 0); assert_eq!( - cache.proposer_is_known(&sidecar_a), + cache.observation_key_is_known(sidecar_a.as_ref()), Ok(false), "no observation in empty cache" ); assert_eq!( - cache.observe_sidecar(&sidecar_a), + cache.observe_sidecar(sidecar_a.as_ref()), Ok(false), "can observe proposer, indicates proposer unobserved" ); assert_eq!( - cache.proposer_is_known(&sidecar_a), + cache.observation_key_is_known(sidecar_a.as_ref()), Ok(true), "observed block is indicated as true" ); assert_eq!( - cache.observe_sidecar(&sidecar_a), + cache.observe_sidecar(sidecar_a.as_ref()), Ok(true), "observing again indicates true" ); @@ -374,7 +461,7 @@ mod tests { assert_eq!(cache.items.len(), 1, "only one slot should be present"); let cached_blob_indices = cache .items - .get(&ProposalKey::new(proposer_index_a, Slot::new(0))) + .get(&ObservationKey::new::, E>(sidecar_a.as_ref(), &spec).unwrap()) .expect("slot zero should be present"); assert_eq!( cached_blob_indices.len(), @@ -388,22 +475,22 @@ mod tests { let sidecar_b = get_blob_sidecar(1, proposer_index_b, 0); assert_eq!( - cache.proposer_is_known(&sidecar_b), + cache.observation_key_is_known(sidecar_b.as_ref()), Ok(false), "no observation for new slot" ); assert_eq!( - cache.observe_sidecar(&sidecar_b), + cache.observe_sidecar(sidecar_b.as_ref()), Ok(false), "can observe proposer for new slot, indicates proposer unobserved" ); assert_eq!( - cache.proposer_is_known(&sidecar_b), + cache.observation_key_is_known(sidecar_b.as_ref()), Ok(true), "observed block in slot 1 is indicated as true" ); assert_eq!( - cache.observe_sidecar(&sidecar_b), + cache.observe_sidecar(sidecar_b.as_ref()), Ok(true), "observing slot 1 again indicates true" ); @@ -412,7 +499,7 @@ mod tests { assert_eq!(cache.items.len(), 2, "two slots should be present"); let cached_blob_indices = cache .items - .get(&ProposalKey::new(proposer_index_a, Slot::new(0))) + .get(&ObservationKey::new::, E>(sidecar_a.as_ref(), &spec).unwrap()) .expect("slot zero should be present"); assert_eq!( cached_blob_indices.len(), @@ -421,8 +508,8 @@ mod tests { ); let cached_blob_indices = cache .items - .get(&ProposalKey::new(proposer_index_b, Slot::new(1))) - .expect("slot zero should be present"); + .get(&ObservationKey::new::, E>(sidecar_b.as_ref(), &spec).unwrap()) + .expect("slot one should be present"); assert_eq!( cached_blob_indices.len(), 1, @@ -433,22 +520,22 @@ mod tests { let sidecar_c = get_blob_sidecar(0, proposer_index_a, 1); assert_eq!( - cache.proposer_is_known(&sidecar_c), + cache.observation_key_is_known(sidecar_c.as_ref()), Ok(false), "no observation for new index" ); assert_eq!( - cache.observe_sidecar(&sidecar_c), + cache.observe_sidecar(sidecar_c.as_ref()), Ok(false), "can observe new index, indicates sidecar unobserved for new index" ); assert_eq!( - cache.proposer_is_known(&sidecar_c), + cache.observation_key_is_known(sidecar_c.as_ref()), Ok(true), "observed new sidecar is indicated as true" ); assert_eq!( - cache.observe_sidecar(&sidecar_c), + cache.observe_sidecar(sidecar_c.as_ref()), Ok(true), "observing new sidecar again indicates true" ); @@ -457,7 +544,7 @@ mod tests { assert_eq!(cache.items.len(), 2, "two slots should be present"); let cached_blob_indices = cache .items - .get(&ProposalKey::new(proposer_index_a, Slot::new(0))) + .get(&ObservationKey::new::, E>(sidecar_a.as_ref(), &spec).unwrap()) .expect("slot zero should be present"); assert_eq!( cached_blob_indices.len(), @@ -465,41 +552,35 @@ mod tests { "two blob indices should be present in slot 0" ); - // Create a sidecar sharing slot and proposer but with a different block root. - let mut sidecar_d: BlobSidecar = BlobSidecar { - index: sidecar_c.index, - blob: sidecar_c.blob.clone(), - kzg_commitment: sidecar_c.kzg_commitment, - kzg_proof: sidecar_c.kzg_proof, - signed_block_header: sidecar_c.signed_block_header.clone(), - kzg_commitment_inclusion_proof: sidecar_c.kzg_commitment_inclusion_proof.clone(), - }; - sidecar_d.signed_block_header.message.body_root = Hash256::repeat_byte(7); + // Create a sidecar with a different proposer index (pre-gloas: keyed by proposer, + // post-gloas: keyed by block root). + let proposer_index_c = 422; + let sidecar_d = get_blob_sidecar(0, proposer_index_c, 0); assert_eq!( - cache.proposer_is_known(&sidecar_d), - Ok(true), - "there has been an observation for this proposer index" + cache.observation_key_is_known(sidecar_d.as_ref()), + Ok(false), + "no observation for new proposer" ); assert_eq!( - cache.observe_sidecar(&sidecar_d), - Ok(true), - "indicates sidecar proposer was observed" + cache.observe_sidecar(sidecar_d.as_ref()), + Ok(false), + "can observe sidecar, indicates sidecar unobserved for new proposer" ); let cached_blob_indices = cache .items - .get(&ProposalKey::new(proposer_index_a, Slot::new(0))) - .expect("slot zero should be present"); + .get(&ObservationKey::new::, E>(sidecar_d.as_ref(), &spec).unwrap()) + .expect("sidecar_d's observation key should be present"); assert_eq!( cached_blob_indices.len(), - 2, - "two blob indices should be present in slot 0" + 1, + "one blob index should be present for sidecar_d's observation key" ); // Try adding an out of bounds index let invalid_index = spec.max_blobs_per_block(Epoch::new(0)); let sidecar_d = get_blob_sidecar(0, proposer_index_a, invalid_index); assert_eq!( - cache.observe_sidecar(&sidecar_d), + cache.observe_sidecar(sidecar_d.as_ref()), Err(Error::InvalidDataIndex(invalid_index)), "cannot add an index > MaxBlobsPerBlock" ); diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index b6c235a4cb0..ba37adeebef 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -3,7 +3,7 @@ use crate::block_verification_types::{AsBlock, RpcBlock}; use crate::custody_context::NodeCustodyType; use crate::data_column_verification::CustodyDataColumn; use crate::graffiti_calculator::GraffitiSettings; -use crate::kzg_utils::build_data_column_sidecars; +use crate::kzg_utils::{build_data_column_sidecars_fulu, build_data_column_sidecars_gloas}; use crate::observed_operations::ObservationOutcome; pub use crate::persisted_beacon_chain::PersistedBeaconChain; use crate::{BeaconBlockResponseWrapper, get_block_root}; @@ -2441,7 +2441,12 @@ where // Blobs are stored as data columns from Fulu (PeerDAS) if self.spec.is_peer_das_enabled_for_epoch(block.epoch()) { - let columns = self.chain.get_data_columns(&block_root).unwrap().unwrap(); + let fork_name = self.spec.fork_name_at_epoch(block.epoch()); + let columns = self + .chain + .get_data_columns(&block_root, fork_name) + .unwrap() + .unwrap(); let custody_columns = columns .into_iter() .map(CustodyDataColumn::from_asserted_custody) @@ -2470,7 +2475,7 @@ where // currently have any knowledge of the columns being custodied. let columns = generate_data_column_sidecars_from_block(&block, &self.spec) .into_iter() - .filter(|d| sampling_columns.contains(&d.index)) + .filter(|d| sampling_columns.contains(d.index())) .map(CustodyDataColumn::from_asserted_custody) .collect::>(); RpcBlock::new_with_custody_columns(Some(block_root), block, columns)? @@ -3209,10 +3214,10 @@ where let verified_columns = generate_data_column_sidecars_from_block(block, &self.spec) .into_iter() - .filter(|c| custody_columns.contains(&c.index)) + .filter(|c| custody_columns.contains(c.index())) .map(|sidecar| { let subnet_id = - DataColumnSubnetId::from_column_index(sidecar.index, &self.spec); + DataColumnSubnetId::from_column_index(*sidecar.index(), &self.spec); self.chain .verify_data_column_sidecar_for_gossip(sidecar, subnet_id) }) @@ -3363,39 +3368,76 @@ pub fn generate_data_column_sidecars_from_block( .unwrap(); let signed_block_header = block.signed_block_header(); - // load the precomputed column sidecar to avoid computing them for every block in the tests. - let template_data_columns = RuntimeVariableList::>::from_ssz_bytes( - TEST_DATA_COLUMN_SIDECARS_SSZ, - E::number_of_columns(), - ) - .unwrap(); + // Load the precomputed column sidecar to avoid computing them for every block in the tests. + // Then repeat the cells and proofs for every blob + let blob_cells_and_proofs_vec = if block.fork_name_unchecked().gloas_enabled() { + let template_data_columns = + RuntimeVariableList::>::from_ssz_bytes( + TEST_DATA_COLUMN_SIDECARS_SSZ, + E::number_of_columns(), + ) + .unwrap(); - let (cells, proofs) = template_data_columns - .into_iter() - .map(|sidecar| { - let DataColumnSidecar { - column, kzg_proofs, .. - } = sidecar; - // There's only one cell per column for a single blob - let cell_bytes: Vec = column.into_iter().next().unwrap().into(); - let kzg_cell = cell_bytes.try_into().unwrap(); - let kzg_proof = kzg_proofs.into_iter().next().unwrap(); - (kzg_cell, kzg_proof) - }) - .collect::<(Vec<_>, Vec<_>)>(); + let (cells, proofs) = template_data_columns + .into_iter() + .map(|sidecar| { + let DataColumnSidecarGloas { + column, kzg_proofs, .. + } = sidecar; + // There's only one cell per column for a single blob + let cell_bytes: Vec = column.into_iter().next().unwrap().into(); + let kzg_cell = cell_bytes.try_into().unwrap(); + let kzg_proof = kzg_proofs.into_iter().next().unwrap(); + (kzg_cell, kzg_proof) + }) + .collect::<(Vec<_>, Vec<_>)>(); - // Repeat the cells and proofs for every blob - let blob_cells_and_proofs_vec = - vec![(cells.try_into().unwrap(), proofs.try_into().unwrap()); kzg_commitments.len()]; + vec![(cells.try_into().unwrap(), proofs.try_into().unwrap()); kzg_commitments.len()] + } else { + // load the precomputed column sidecar to avoid computing them for every block in the tests. + let template_data_columns = + RuntimeVariableList::>::from_ssz_bytes( + TEST_DATA_COLUMN_SIDECARS_SSZ, + E::number_of_columns(), + ) + .unwrap(); - build_data_column_sidecars( - kzg_commitments.clone(), - kzg_commitments_inclusion_proof, - signed_block_header, - blob_cells_and_proofs_vec, - spec, - ) - .unwrap() + let (cells, proofs) = template_data_columns + .into_iter() + .map(|sidecar| { + let DataColumnSidecarFulu { + column, kzg_proofs, .. + } = sidecar; + // There's only one cell per column for a single blob + let cell_bytes: Vec = column.into_iter().next().unwrap().into(); + let kzg_cell = cell_bytes.try_into().unwrap(); + let kzg_proof = kzg_proofs.into_iter().next().unwrap(); + (kzg_cell, kzg_proof) + }) + .collect::<(Vec<_>, Vec<_>)>(); + + vec![(cells.try_into().unwrap(), proofs.try_into().unwrap()); kzg_commitments.len()] + }; + + if block.fork_name_unchecked().gloas_enabled() { + build_data_column_sidecars_gloas( + kzg_commitments.clone(), + signed_block_header.message.tree_hash_root(), + signed_block_header.message.slot, + blob_cells_and_proofs_vec, + spec, + ) + .unwrap() + } else { + build_data_column_sidecars_fulu( + kzg_commitments.clone(), + kzg_commitments_inclusion_proof, + signed_block_header, + blob_cells_and_proofs_vec, + spec, + ) + .unwrap() + } } pub fn generate_data_column_indices_rand_order() -> Vec { diff --git a/beacon_node/beacon_chain/tests/block_verification.rs b/beacon_node/beacon_chain/tests/block_verification.rs index 2644b74b28e..771edf8f70b 100644 --- a/beacon_node/beacon_chain/tests/block_verification.rs +++ b/beacon_node/beacon_chain/tests/block_verification.rs @@ -81,10 +81,12 @@ async fn get_chain_segment() -> (Vec>, Vec( ) { for old_custody_column_sidecar in data_columns.as_mut_slice() { let old_column_sidecar = old_custody_column_sidecar.as_data_column(); - let new_column_sidecar = Arc::new(DataColumnSidecar:: { - index: old_column_sidecar.index, - column: old_column_sidecar.column.clone(), - kzg_commitments: old_column_sidecar.kzg_commitments.clone(), - kzg_proofs: old_column_sidecar.kzg_proofs.clone(), + let new_column_sidecar = Arc::new(DataColumnSidecar::Fulu(DataColumnSidecarFulu { + index: *old_column_sidecar.index(), + column: old_column_sidecar.column().clone(), + kzg_commitments: old_column_sidecar.kzg_commitments().clone(), + kzg_proofs: old_column_sidecar.kzg_proofs().clone(), signed_block_header: signed_block.signed_block_header(), kzg_commitments_inclusion_proof: signed_block .message() .body() .kzg_commitments_merkle_proof() .unwrap(), - }); + })); *old_custody_column_sidecar = CustodyDataColumn::from_asserted_custody(new_column_sidecar); } } diff --git a/beacon_node/beacon_chain/tests/events.rs b/beacon_node/beacon_chain/tests/events.rs index 35884541e19..315da0862c1 100644 --- a/beacon_node/beacon_chain/tests/events.rs +++ b/beacon_node/beacon_chain/tests/events.rs @@ -9,7 +9,10 @@ use rand::rngs::StdRng; use std::sync::Arc; use types::data::FixedBlobSidecarList; use types::test_utils::TestRandom; -use types::{BlobSidecar, DataColumnSidecar, EthSpec, MinimalEthSpec, Slot}; +use types::{ + BlobSidecar, DataColumnSidecar, DataColumnSidecarFulu, DataColumnSidecarGloas, EthSpec, + MinimalEthSpec, Slot, +}; type E = MinimalEthSpec; @@ -73,13 +76,22 @@ async fn data_column_sidecar_event_on_process_gossip_data_column() { // build and process a gossip verified data column let mut rng = StdRng::seed_from_u64(0xDEADBEEF0BAD5EEDu64); let sidecar = { - // DA checker only accepts sampling columns, so we need to create one with a sampling index. - let mut random_sidecar = DataColumnSidecar::random_for_test(&mut rng); let slot = Slot::new(10); - let epoch = slot.epoch(E::slots_per_epoch()); - random_sidecar.signed_block_header.message.slot = slot; - random_sidecar.index = harness.chain.sampling_columns_for_epoch(epoch)[0]; - random_sidecar + let fork_name = harness.spec.fork_name_at_slot::(slot); + // DA checker only accepts sampling columns, so we need to create one with a sampling index. + if fork_name.gloas_enabled() { + let mut random_sidecar = DataColumnSidecarGloas::random_for_test(&mut rng); + let epoch = slot.epoch(E::slots_per_epoch()); + random_sidecar.slot = slot; + random_sidecar.index = harness.chain.sampling_columns_for_epoch(epoch)[0]; + DataColumnSidecar::Gloas(random_sidecar) + } else { + let mut random_sidecar = DataColumnSidecarFulu::random_for_test(&mut rng); + let epoch = slot.epoch(E::slots_per_epoch()); + random_sidecar.signed_block_header.message.slot = slot; + random_sidecar.index = harness.chain.sampling_columns_for_epoch(epoch)[0]; + DataColumnSidecar::Fulu(random_sidecar) + } }; let gossip_verified_data_column = GossipVerifiedDataColumn::__new_for_testing(Arc::new(sidecar)); diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index 69e1346cfd6..598b79acc2e 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -3381,7 +3381,7 @@ async fn test_import_historical_data_columns_batch() { .await; harness.advance_slot(); - let block_root_iter = harness + let block_root_and_slot = harness .chain .forwards_iter_block_roots_until(start_slot, end_slot) .unwrap(); @@ -3389,9 +3389,14 @@ async fn test_import_historical_data_columns_batch() { let mut data_columns_list = vec![]; // Get all data columns for epoch 0 - for block in block_root_iter { - let (block_root, _) = block.unwrap(); - let data_columns = harness.chain.store.get_data_columns(&block_root).unwrap(); + for block_root_and_slot in block_root_and_slot { + let (block_root, slot) = block_root_and_slot.unwrap(); + let fork_name = harness.spec.fork_name_at_slot::(slot); + let data_columns = harness + .chain + .store + .get_data_columns(&block_root, fork_name) + .unwrap(); for data_column in data_columns.unwrap_or_default() { data_columns_list.push(data_column); } @@ -3416,15 +3421,20 @@ async fn test_import_historical_data_columns_batch() { .try_prune_blobs(true, Epoch::new(2)) .unwrap(); - let block_root_iter = harness + let block_root_and_slot_iter = harness .chain .forwards_iter_block_roots_until(start_slot, end_slot) .unwrap(); // Assert that data columns no longer exist for epoch 0 - for block in block_root_iter { - let (block_root, _) = block.unwrap(); - let data_columns = harness.chain.store.get_data_columns(&block_root).unwrap(); + for block_root_and_slot in block_root_and_slot_iter { + let (block_root, slot) = block_root_and_slot.unwrap(); + let fork_name = harness.spec.fork_name_at_slot::(slot); + let data_columns = harness + .chain + .store + .get_data_columns(&block_root, fork_name) + .unwrap(); assert!(data_columns.is_none()) } @@ -3434,14 +3444,14 @@ async fn test_import_historical_data_columns_batch() { .import_historical_data_column_batch(Epoch::new(0), data_columns_list, cgc) .unwrap(); - let block_root_iter = harness + let block_root_and_slot_iter = harness .chain .forwards_iter_block_roots_until(start_slot, end_slot) .unwrap(); // Assert that data columns now exist for epoch 0 - for block in block_root_iter { - let (block_root, _) = block.unwrap(); + for block_root_and_slot in block_root_and_slot_iter { + let (block_root, slot) = block_root_and_slot.unwrap(); if !harness .get_block(block_root.into()) .unwrap() @@ -3451,7 +3461,12 @@ async fn test_import_historical_data_columns_batch() { .unwrap() .is_empty() { - let data_columns = harness.chain.store.get_data_columns(&block_root).unwrap(); + let fork_name = harness.spec.fork_name_at_slot::(slot); + let data_columns = harness + .chain + .store + .get_data_columns(&block_root, fork_name) + .unwrap(); assert!(data_columns.is_some()) }; } @@ -3479,7 +3494,7 @@ async fn test_import_historical_data_columns_batch_mismatched_block_root() { .await; harness.advance_slot(); - let block_root_iter = harness + let block_root_and_slot_iter = harness .chain .forwards_iter_block_roots_until(start_slot, end_slot) .unwrap(); @@ -3488,14 +3503,23 @@ async fn test_import_historical_data_columns_batch_mismatched_block_root() { // Get all data columns from start_slot to end_slot // and mutate the data columns with an invalid block root - for block in block_root_iter { - let (block_root, _) = block.unwrap(); - let data_columns = harness.chain.store.get_data_columns(&block_root).unwrap(); + for block_root_and_slot in block_root_and_slot_iter { + let (block_root, slot) = block_root_and_slot.unwrap(); + let fork_name = harness.spec.fork_name_at_slot::(slot); + let data_columns = harness + .chain + .store + .get_data_columns(&block_root, fork_name) + .unwrap(); for data_column in data_columns.unwrap_or_default() { let mut data_column = (*data_column).clone(); - if data_column.index % 2 == 0 { - data_column.signed_block_header.message.body_root = Hash256::ZERO; + if data_column.index() % 2 == 0 { + data_column + .signed_block_header_mut() + .unwrap() + .message + .body_root = Hash256::ZERO; } data_columns_list.push(Arc::new(data_column)); @@ -3520,15 +3544,20 @@ async fn test_import_historical_data_columns_batch_mismatched_block_root() { .try_prune_blobs(true, Epoch::new(2)) .unwrap(); - let block_root_iter = harness + let block_root_and_slot_iter = harness .chain .forwards_iter_block_roots_until(start_slot, end_slot) .unwrap(); // Assert there are no columns between start_slot and end_slot - for block in block_root_iter { - let (block_root, _) = block.unwrap(); - let data_columns = harness.chain.store.get_data_columns(&block_root).unwrap(); + for block_root_and_slot in block_root_and_slot_iter { + let (block_root, slot) = block_root_and_slot.unwrap(); + let fork_name = harness.spec.fork_name_at_slot::(slot); + let data_columns = harness + .chain + .store + .get_data_columns(&block_root, fork_name) + .unwrap(); assert!(data_columns.is_none()) } @@ -3574,20 +3603,29 @@ async fn test_import_historical_data_columns_batch_no_block_found() { .await; harness.advance_slot(); - let block_root_iter = harness + let block_root_and_slot_iter = harness .chain .forwards_iter_block_roots_until(start_slot, end_slot) .unwrap(); let mut data_columns_list = vec![]; - for block in block_root_iter { - let (block_root, _) = block.unwrap(); - let data_columns = harness.chain.store.get_data_columns(&block_root).unwrap(); + for block_root_and_slot in block_root_and_slot_iter { + let (block_root, slot) = block_root_and_slot.unwrap(); + let fork_name = harness.spec.fork_name_at_slot::(slot); + let data_columns = harness + .chain + .store + .get_data_columns(&block_root, fork_name) + .unwrap(); for data_column in data_columns.unwrap_or_default() { let mut data_column = (*data_column).clone(); - data_column.signed_block_header.message.body_root = Hash256::ZERO; + data_column + .signed_block_header_mut() + .unwrap() + .message + .body_root = Hash256::ZERO; data_columns_list.push(Arc::new(data_column)); } } @@ -3610,14 +3648,19 @@ async fn test_import_historical_data_columns_batch_no_block_found() { .try_prune_blobs(true, Epoch::new(2)) .unwrap(); - let block_root_iter = harness + let block_root_and_slot_iter = harness .chain .forwards_iter_block_roots_until(start_slot, end_slot) .unwrap(); - for block in block_root_iter { - let (block_root, _) = block.unwrap(); - let data_columns = harness.chain.store.get_data_columns(&block_root).unwrap(); + for block_root_and_slot in block_root_and_slot_iter { + let (block_root, slot) = block_root_and_slot.unwrap(); + let fork_name = harness.spec.fork_name_at_slot::(slot); + let data_columns = harness + .chain + .store + .get_data_columns(&block_root, fork_name) + .unwrap(); assert!(data_columns.is_none()) } @@ -4996,7 +5039,13 @@ fn check_data_column_existence( .unwrap() .map(Result::unwrap) { - if let Some(columns) = harness.chain.store.get_data_columns(&block_root).unwrap() { + let fork_name = harness.spec.fork_name_at_slot::(slot); + if let Some(columns) = harness + .chain + .store + .get_data_columns(&block_root, fork_name) + .unwrap() + { assert!(should_exist, "columns at slot {slot} exist but should not"); columns_seen += columns.len(); } else { diff --git a/beacon_node/http_api/src/block_id.rs b/beacon_node/http_api/src/block_id.rs index 6a0cdc33a08..e6b1ed08794 100644 --- a/beacon_node/http_api/src/block_id.rs +++ b/beacon_node/http_api/src/block_id.rs @@ -281,7 +281,9 @@ impl BlockId { warp_utils::reject::custom_not_found(format!("beacon block with root {}", root)) })?; - if !chain.spec.is_peer_das_enabled_for_epoch(block.epoch()) { + let fork_name = chain.spec.fork_name_at_epoch(block.epoch()); + + if !fork_name.fulu_enabled() { return Err(warp_utils::reject::custom_bad_request( "block is pre-Fulu and has no data columns".to_string(), )); @@ -290,12 +292,12 @@ impl BlockId { let data_column_sidecars = if let Some(indices) = query.indices { indices .iter() - .filter_map(|index| chain.get_data_column(&root, index).transpose()) + .filter_map(|index| chain.get_data_column(&root, index, fork_name).transpose()) .collect::, _>>() .map_err(warp_utils::reject::unhandled_error)? } else { chain - .get_data_columns(&root) + .get_data_columns(&root, fork_name) .map_err(warp_utils::reject::unhandled_error)? .unwrap_or_default() }; @@ -462,17 +464,18 @@ impl BlockId { let num_found_column_keys = column_indices.len(); let num_required_columns = T::EthSpec::number_of_columns() / 2; let is_blob_available = num_found_column_keys >= num_required_columns; + let fork_name = chain.spec.fork_name_at_epoch(block.epoch()); if is_blob_available { let data_columns = column_indices .into_iter() - .filter_map( - |column_index| match chain.get_data_column(&root, &column_index) { + .filter_map(|column_index| { + match chain.get_data_column(&root, &column_index, fork_name) { Ok(Some(data_column)) => Some(Ok(data_column)), Ok(None) => None, Err(e) => Some(Err(warp_utils::reject::unhandled_error(e))), - }, - ) + } + }) .collect::, _>>()?; reconstruct_blobs(&chain.kzg, data_columns, blob_indices, block, &chain.spec).map_err( diff --git a/beacon_node/http_api/src/publish_blocks.rs b/beacon_node/http_api/src/publish_blocks.rs index b54c071eb80..5d98a23da92 100644 --- a/beacon_node/http_api/src/publish_blocks.rs +++ b/beacon_node/http_api/src/publish_blocks.rs @@ -515,14 +515,14 @@ fn publish_column_sidecars( data_column_sidecars.shuffle(&mut **chain.rng.lock()); let dropped_indices = data_column_sidecars .drain(columns_to_keep..) - .map(|d| d.index) + .map(|d| *d.index()) .collect::>(); debug!(indices = ?dropped_indices, "Dropping data columns from publishing"); } let pubsub_messages = data_column_sidecars .into_iter() .map(|data_col| { - let subnet = DataColumnSubnetId::from_column_index(data_col.index, &chain.spec); + let subnet = DataColumnSubnetId::from_column_index(*data_col.index(), &chain.spec); PubsubMessage::DataColumnSidecar(Box::new((subnet, data_col))) }) .collect::>(); diff --git a/beacon_node/lighthouse_network/src/rpc/codec.rs b/beacon_node/lighthouse_network/src/rpc/codec.rs index 3611f023917..36d9726dd9c 100644 --- a/beacon_node/lighthouse_network/src/rpc/codec.rs +++ b/beacon_node/lighthouse_network/src/rpc/codec.rs @@ -693,7 +693,7 @@ fn handle_rpc_response( Some(fork_name) => { if fork_name.fulu_enabled() { Ok(Some(RpcSuccessResponse::DataColumnsByRoot(Arc::new( - DataColumnSidecar::from_ssz_bytes(decoded_buffer)?, + DataColumnSidecar::from_ssz_bytes_for_fork(decoded_buffer, fork_name)?, )))) } else { Err(RPCError::ErrorResponse( @@ -714,7 +714,7 @@ fn handle_rpc_response( Some(fork_name) => { if fork_name.fulu_enabled() { Ok(Some(RpcSuccessResponse::DataColumnsByRange(Arc::new( - DataColumnSidecar::from_ssz_bytes(decoded_buffer)?, + DataColumnSidecar::from_ssz_bytes_for_fork(decoded_buffer, fork_name)?, )))) } else { Err(RPCError::ErrorResponse( @@ -916,6 +916,7 @@ mod tests { SignedBeaconBlockHeader, Slot, data::{BlobIdentifier, Cell}, }; + use types::{BlobSidecar, DataColumnSidecarFulu}; type Spec = types::MainnetEthSpec; @@ -977,7 +978,7 @@ mod tests { fn empty_data_column_sidecar(spec: &ChainSpec) -> Arc> { // The context bytes are now derived from the block epoch, so we need to have the slot set // here. - let data_column_sidecar = DataColumnSidecar { + let data_column_sidecar = DataColumnSidecar::Fulu(DataColumnSidecarFulu { index: 0, column: VariableList::new(vec![Cell::::default()]).unwrap(), kzg_commitments: VariableList::new(vec![KzgCommitment::empty_for_testing()]).unwrap(), @@ -993,7 +994,7 @@ mod tests { signature: Signature::empty(), }, kzg_commitments_inclusion_proof: Default::default(), - }; + }); Arc::new(data_column_sidecar) } diff --git a/beacon_node/lighthouse_network/src/rpc/methods.rs b/beacon_node/lighthouse_network/src/rpc/methods.rs index 0539877c722..5a9a683b758 100644 --- a/beacon_node/lighthouse_network/src/rpc/methods.rs +++ b/beacon_node/lighthouse_network/src/rpc/methods.rs @@ -15,9 +15,9 @@ use superstruct::superstruct; use types::data::BlobIdentifier; use types::light_client::consts::MAX_REQUEST_LIGHT_CLIENT_UPDATES; use types::{ - ChainSpec, ColumnIndex, DataColumnSidecar, DataColumnsByRootIdentifier, Epoch, EthSpec, - ForkContext, Hash256, LightClientBootstrap, LightClientFinalityUpdate, - LightClientOptimisticUpdate, LightClientUpdate, SignedBeaconBlock, Slot, data::BlobSidecar, + BlobSidecar, ChainSpec, ColumnIndex, DataColumnSidecar, DataColumnsByRootIdentifier, Epoch, + EthSpec, ForkContext, Hash256, LightClientBootstrap, LightClientFinalityUpdate, + LightClientOptimisticUpdate, LightClientUpdate, SignedBeaconBlock, Slot, }; /// Maximum length of error message. @@ -762,12 +762,8 @@ impl RpcSuccessResponse { pub fn slot(&self) -> Option { match self { Self::BlocksByRange(r) | Self::BlocksByRoot(r) => Some(r.slot()), - Self::BlobsByRange(r) | Self::BlobsByRoot(r) => { - Some(r.signed_block_header.message.slot) - } - Self::DataColumnsByRange(r) | Self::DataColumnsByRoot(r) => { - Some(r.signed_block_header.message.slot) - } + Self::BlobsByRange(r) | Self::BlobsByRoot(r) => Some(r.slot()), + Self::DataColumnsByRange(r) | Self::DataColumnsByRoot(r) => Some(r.slot()), Self::LightClientBootstrap(r) => Some(r.get_slot()), Self::LightClientFinalityUpdate(r) => Some(r.get_attested_header_slot()), Self::LightClientOptimisticUpdate(r) => Some(r.get_slot()), diff --git a/beacon_node/lighthouse_network/src/rpc/protocol.rs b/beacon_node/lighthouse_network/src/rpc/protocol.rs index 366515d42f6..f0ac9d00f9e 100644 --- a/beacon_node/lighthouse_network/src/rpc/protocol.rs +++ b/beacon_node/lighthouse_network/src/rpc/protocol.rs @@ -17,11 +17,12 @@ use tokio_util::{ compat::{Compat, FuturesAsyncReadCompatExt}, }; use types::{ - BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BlobSidecar, ChainSpec, DataColumnSidecar, - EmptyBlock, Epoch, EthSpec, EthSpecId, ForkContext, ForkName, LightClientBootstrap, - LightClientBootstrapAltair, LightClientFinalityUpdate, LightClientFinalityUpdateAltair, - LightClientOptimisticUpdate, LightClientOptimisticUpdateAltair, LightClientUpdate, - MainnetEthSpec, MinimalEthSpec, SignedBeaconBlock, + BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BlobSidecar, ChainSpec, DataColumnSidecarFulu, + DataColumnSidecarGloas, EmptyBlock, Epoch, EthSpec, EthSpecId, ForkContext, ForkName, + LightClientBootstrap, LightClientBootstrapAltair, LightClientFinalityUpdate, + LightClientFinalityUpdateAltair, LightClientOptimisticUpdate, + LightClientOptimisticUpdateAltair, LightClientUpdate, MainnetEthSpec, MinimalEthSpec, + SignedBeaconBlock, }; // Note: Hardcoding the `EthSpec` type for `SignedBeaconBlock` as min/max values is @@ -640,10 +641,23 @@ pub fn rpc_data_column_limits( current_digest_epoch: Epoch, spec: &ChainSpec, ) -> RpcLimits { - RpcLimits::new( - DataColumnSidecar::::min_size(), - DataColumnSidecar::::max_size(spec.max_blobs_per_block(current_digest_epoch) as usize), - ) + let fork_name = spec.fork_name_at_epoch(current_digest_epoch); + + if fork_name.gloas_enabled() { + RpcLimits::new( + DataColumnSidecarGloas::::min_size(), + DataColumnSidecarGloas::::max_size( + spec.max_blobs_per_block(current_digest_epoch) as usize + ), + ) + } else { + RpcLimits::new( + DataColumnSidecarFulu::::min_size(), + DataColumnSidecarFulu::::max_size( + spec.max_blobs_per_block(current_digest_epoch) as usize + ), + ) + } } /* Inbound upgrade */ diff --git a/beacon_node/lighthouse_network/src/types/pubsub.rs b/beacon_node/lighthouse_network/src/types/pubsub.rs index 231e71742f4..ffdbbc43be4 100644 --- a/beacon_node/lighthouse_network/src/types/pubsub.rs +++ b/beacon_node/lighthouse_network/src/types/pubsub.rs @@ -277,7 +277,7 @@ impl PubsubMessage { match fork_context.get_fork_from_context_bytes(gossip_topic.fork_digest) { Some(fork) if fork.fulu_enabled() => { let col_sidecar = Arc::new( - DataColumnSidecar::from_ssz_bytes(data) + DataColumnSidecar::from_ssz_bytes_for_fork(data, *fork) .map_err(|e| format!("{:?}", e))?, ); Ok(PubsubMessage::DataColumnSidecar(Box::new(( @@ -437,7 +437,7 @@ impl std::fmt::Display for PubsubMessage { f, "DataColumnSidecar: slot: {}, column index: {}", data.1.slot(), - data.1.index, + data.1.index(), ), PubsubMessage::AggregateAndProofAttestation(att) => write!( f, diff --git a/beacon_node/lighthouse_network/tests/rpc_tests.rs b/beacon_node/lighthouse_network/tests/rpc_tests.rs index 2a17a04b905..a414b3dc63a 100644 --- a/beacon_node/lighthouse_network/tests/rpc_tests.rs +++ b/beacon_node/lighthouse_network/tests/rpc_tests.rs @@ -17,9 +17,9 @@ use tokio::time::sleep; use tracing::{Instrument, debug, error, info, info_span, warn}; use types::{ BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockBellatrix, BeaconBlockHeader, - BlobSidecar, ChainSpec, DataColumnSidecar, DataColumnsByRootIdentifier, EmptyBlock, Epoch, - EthSpec, ForkName, Hash256, KzgCommitment, KzgProof, MinimalEthSpec, SignedBeaconBlock, - SignedBeaconBlockHeader, Slot, + BlobSidecar, ChainSpec, DataColumnSidecar, DataColumnSidecarFulu, DataColumnSidecarGloas, + DataColumnsByRootIdentifier, EmptyBlock, Epoch, EthSpec, ForkName, Hash256, KzgCommitment, + KzgProof, MinimalEthSpec, SignedBeaconBlock, SignedBeaconBlockHeader, Slot, }; type E = MinimalEthSpec; @@ -963,7 +963,8 @@ fn test_tcp_columns_by_root_chunked_rpc() { let messages_to_send = 32 * num_of_columns; let spec = Arc::new(spec_with_all_forks_enabled()); - let current_fork_name = ForkName::Fulu; + let slot = 320u64.into(); + let current_fork_name = spec.fork_name_at_slot::(slot); let rt = Arc::new(Runtime::new().unwrap()); // get sender/receiver @@ -1007,30 +1008,44 @@ fn test_tcp_columns_by_root_chunked_rpc() { let rpc_request = RequestType::DataColumnsByRoot(req); // DataColumnsByRoot Response - let data_column = Arc::new(DataColumnSidecar { - index: 1, - signed_block_header: SignedBeaconBlockHeader { - message: BeaconBlockHeader { - slot: 320u64.into(), - proposer_index: 1, - parent_root: Hash256::zero(), - state_root: Hash256::zero(), - body_root: Hash256::zero(), + let data_column = if current_fork_name.gloas_enabled() { + Arc::new(DataColumnSidecar::Gloas(DataColumnSidecarGloas { + index: 1, + slot, + beacon_block_root: Hash256::zero(), + + column: vec![vec![0; E::bytes_per_cell()].try_into().unwrap()] + .try_into() + .unwrap(), + kzg_commitments: vec![KzgCommitment::empty_for_testing()].try_into().unwrap(), + kzg_proofs: vec![KzgProof::empty()].try_into().unwrap(), + })) + } else { + Arc::new(DataColumnSidecar::Fulu(DataColumnSidecarFulu { + index: 1, + signed_block_header: SignedBeaconBlockHeader { + message: BeaconBlockHeader { + slot, + proposer_index: 1, + parent_root: Hash256::zero(), + state_root: Hash256::zero(), + body_root: Hash256::zero(), + }, + signature: Signature::empty(), }, - signature: Signature::empty(), - }, - column: vec![vec![0; E::bytes_per_cell()].try_into().unwrap()] + column: vec![vec![0; E::bytes_per_cell()].try_into().unwrap()] + .try_into() + .unwrap(), + kzg_commitments: vec![KzgCommitment::empty_for_testing()].try_into().unwrap(), + kzg_proofs: vec![KzgProof::empty()].try_into().unwrap(), + kzg_commitments_inclusion_proof: vec![ + Hash256::zero(); + E::kzg_commitments_inclusion_proof_depth() + ] .try_into() .unwrap(), - kzg_commitments: vec![KzgCommitment::empty_for_testing()].try_into().unwrap(), - kzg_proofs: vec![KzgProof::empty()].try_into().unwrap(), - kzg_commitments_inclusion_proof: vec![ - Hash256::zero(); - E::kzg_commitments_inclusion_proof_depth() - ] - .try_into() - .unwrap(), - }); + })) + }; let rpc_response = Response::DataColumnsByRoot(Some(data_column.clone())); @@ -1129,7 +1144,8 @@ fn test_tcp_columns_by_range_chunked_rpc() { let messages_to_send = 32; let spec = Arc::new(spec_with_all_forks_enabled()); - let current_fork_name = ForkName::Fulu; + let slot: Slot = 320u64.into(); + let current_fork_name = spec.fork_name_at_slot::(slot); let rt = Arc::new(Runtime::new().unwrap()); // get sender/receiver @@ -1146,36 +1162,49 @@ fn test_tcp_columns_by_range_chunked_rpc() { // DataColumnsByRange Request let rpc_request = RequestType::DataColumnsByRange(DataColumnsByRangeRequest { - start_slot: 320, + start_slot: slot.as_u64(), count: 32, columns: (0..E::number_of_columns() as u64).collect(), }); // DataColumnsByRange Response - let data_column = Arc::new(DataColumnSidecar { - index: 1, - signed_block_header: SignedBeaconBlockHeader { - message: BeaconBlockHeader { - slot: 320u64.into(), - proposer_index: 1, - parent_root: Hash256::zero(), - state_root: Hash256::zero(), - body_root: Hash256::zero(), + let data_column = if current_fork_name.gloas_enabled() { + Arc::new(DataColumnSidecar::Gloas(DataColumnSidecarGloas { + index: 1, + slot, + beacon_block_root: Hash256::zero(), + column: vec![vec![0; E::bytes_per_cell()].try_into().unwrap()] + .try_into() + .unwrap(), + kzg_commitments: vec![KzgCommitment::empty_for_testing()].try_into().unwrap(), + kzg_proofs: vec![KzgProof::empty()].try_into().unwrap(), + })) + } else { + Arc::new(DataColumnSidecar::Fulu(DataColumnSidecarFulu { + index: 1, + signed_block_header: SignedBeaconBlockHeader { + message: BeaconBlockHeader { + slot, + proposer_index: 1, + parent_root: Hash256::zero(), + state_root: Hash256::zero(), + body_root: Hash256::zero(), + }, + signature: Signature::empty(), }, - signature: Signature::empty(), - }, - column: vec![vec![0; E::bytes_per_cell()].try_into().unwrap()] + column: vec![vec![0; E::bytes_per_cell()].try_into().unwrap()] + .try_into() + .unwrap(), + kzg_commitments: vec![KzgCommitment::empty_for_testing()].try_into().unwrap(), + kzg_proofs: vec![KzgProof::empty()].try_into().unwrap(), + kzg_commitments_inclusion_proof: vec![ + Hash256::zero(); + E::kzg_commitments_inclusion_proof_depth() + ] .try_into() .unwrap(), - kzg_commitments: vec![KzgCommitment::empty_for_testing()].try_into().unwrap(), - kzg_proofs: vec![KzgProof::empty()].try_into().unwrap(), - kzg_commitments_inclusion_proof: vec![ - Hash256::zero(); - E::kzg_commitments_inclusion_proof_depth() - ] - .try_into() - .unwrap(), - }); + })) + }; let rpc_response = Response::DataColumnsByRange(Some(data_column.clone())); diff --git a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs index ca259129348..6648131a083 100644 --- a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs @@ -609,7 +609,7 @@ impl NetworkBeaconProcessor { parent = None, level = "debug", skip_all, - fields(slot = %column_sidecar.slot(), block_root = ?column_sidecar.block_root(), index = column_sidecar.index), + fields(slot = %column_sidecar.slot(), block_root = ?column_sidecar.block_root(), index = column_sidecar.index()), )] pub async fn process_gossip_data_column_sidecar( self: &Arc, @@ -621,7 +621,7 @@ impl NetworkBeaconProcessor { ) { let slot = column_sidecar.slot(); let block_root = column_sidecar.block_root(); - let index = column_sidecar.index; + let index = *column_sidecar.index(); let delay = get_slot_delay_ms(seen_duration, slot, &self.chain.slot_clock); // Log metrics to track delay from other nodes on the network. metrics::observe_duration( @@ -667,6 +667,15 @@ impl NetworkBeaconProcessor { } Err(err) => { match err { + GossipDataColumnError::InvalidVariant => { + // TODO(gloas) we should probably penalize the peer here + debug!( + %slot, + %block_root, + %index, + "Invalid gossip data column variant." + ) + } GossipDataColumnError::PriorKnownUnpublished => { debug!( %slot, diff --git a/beacon_node/network/src/network_beacon_processor/mod.rs b/beacon_node/network/src/network_beacon_processor/mod.rs index fd9c2c1e55c..5f1ba183915 100644 --- a/beacon_node/network/src/network_beacon_processor/mod.rs +++ b/beacon_node/network/src/network_beacon_processor/mod.rs @@ -980,7 +980,7 @@ impl NetworkBeaconProcessor { .into_iter() .map(|d| { let subnet = - DataColumnSubnetId::from_column_index(d.index, &chain.spec); + DataColumnSubnetId::from_column_index(*d.index(), &chain.spec); PubsubMessage::DataColumnSidecar(Box::new((subnet, d))) }) .collect(), diff --git a/beacon_node/network/src/network_beacon_processor/rpc_methods.rs b/beacon_node/network/src/network_beacon_processor/rpc_methods.rs index e443eb78d89..8d58e40ae6c 100644 --- a/beacon_node/network/src/network_beacon_processor/rpc_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/rpc_methods.rs @@ -753,7 +753,10 @@ impl NetworkBeaconProcessor { ) .ok_or((RpcErrorResponse::ServerError, "shutting down"))? .await - .map_err(|_| (RpcErrorResponse::ServerError, "tokio join"))??; + .map_err(|_| (RpcErrorResponse::ServerError, "tokio join"))?? + .iter() + .map(|(root, _)| *root) + .collect::>(); let current_slot = self .chain @@ -866,7 +869,7 @@ impl NetworkBeaconProcessor { req_start_slot: u64, req_count: u64, req_type: &str, - ) -> Result, (RpcErrorResponse, &'static str)> { + ) -> Result, (RpcErrorResponse, &'static str)> { let start_time = std::time::Instant::now(); let finalized_slot = self .chain @@ -876,7 +879,7 @@ impl NetworkBeaconProcessor { .epoch .start_slot(T::EthSpec::slots_per_epoch()); - let (block_roots, source) = if req_start_slot >= finalized_slot.as_u64() { + let (block_roots_and_slots, source) = if req_start_slot >= finalized_slot.as_u64() { // If the entire requested range is after finalization, use fork_choice ( self.chain @@ -920,14 +923,14 @@ impl NetworkBeaconProcessor { req_type, start_slot = %req_start_slot, req_count, - roots_count = block_roots.len(), + roots_count = block_roots_and_slots.len(), source, elapsed = ?elapsed, %finalized_slot, "Range request block roots retrieved" ); - Ok(block_roots) + Ok(block_roots_and_slots) } /// Get block roots for a `BlocksByRangeRequest` from the store using roots iterator. @@ -935,7 +938,7 @@ impl NetworkBeaconProcessor { &self, start_slot: u64, count: u64, - ) -> Result, (RpcErrorResponse, &'static str)> { + ) -> Result, (RpcErrorResponse, &'static str)> { let forwards_block_root_iter = match self.chain.forwards_iter_block_roots(Slot::from(start_slot)) { Ok(iter) => iter, @@ -981,11 +984,7 @@ impl NetworkBeaconProcessor { }; // remove all skip slots i.e. duplicated roots - Ok(block_roots - .into_iter() - .map(|(root, _)| root) - .unique() - .collect::>()) + Ok(block_roots.into_iter().unique().collect::>()) } /// Handle a `BlobsByRange` request from the peer. @@ -1092,7 +1091,7 @@ impl NetworkBeaconProcessor { }; } - let block_roots = + let block_roots_and_slots = self.get_block_roots_for_slot_range(req.start_slot, effective_count, "BlobsByRange")?; let current_slot = self @@ -1113,7 +1112,7 @@ impl NetworkBeaconProcessor { let mut blobs_sent = 0; - for root in block_roots { + for (root, _) in block_roots_and_slots { match self.chain.get_blobs(&root) { Ok(blob_sidecar_list) => { for blob_sidecar in blob_sidecar_list.iter() { @@ -1252,7 +1251,7 @@ impl NetworkBeaconProcessor { }; } - let block_roots = + let block_roots_and_slots = self.get_block_roots_for_slot_range(req.start_slot, req.count, "DataColumnsByRange")?; let mut data_columns_sent = 0; @@ -1269,9 +1268,10 @@ impl NetworkBeaconProcessor { .filter(|c| available_columns.contains(c)) .collect::>(); - for root in block_roots { + for (root, slot) in block_roots_and_slots { + let fork_name = self.chain.spec.fork_name_at_slot::(slot); for index in &indices_to_retrieve { - match self.chain.get_data_column(&root, index) { + match self.chain.get_data_column(&root, index, fork_name) { Ok(Some(data_column_sidecar)) => { // Due to skip slots, data columns could be out of the range, we ensure they // are in the range before sending diff --git a/beacon_node/network/src/network_beacon_processor/sync_methods.rs b/beacon_node/network/src/network_beacon_processor/sync_methods.rs index 38de6dffada..146bb90965c 100644 --- a/beacon_node/network/src/network_beacon_processor/sync_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/sync_methods.rs @@ -374,7 +374,10 @@ impl NetworkBeaconProcessor { metrics::observe_duration(&metrics::BEACON_BLOB_RPC_SLOT_START_DELAY_TIME, delay); } - let mut indices = custody_columns.iter().map(|d| d.index).collect::>(); + let mut indices = custody_columns + .iter() + .map(|d| *d.index()) + .collect::>(); indices.sort_unstable(); debug!( ?indices, diff --git a/beacon_node/network/src/network_beacon_processor/tests.rs b/beacon_node/network/src/network_beacon_processor/tests.rs index 7b8554662b8..0b6989be747 100644 --- a/beacon_node/network/src/network_beacon_processor/tests.rs +++ b/beacon_node/network/src/network_beacon_processor/tests.rs @@ -10,7 +10,7 @@ use crate::{ }; use beacon_chain::block_verification_types::RpcBlock; use beacon_chain::custody_context::NodeCustodyType; -use beacon_chain::data_column_verification::validate_data_column_sidecar_for_gossip; +use beacon_chain::data_column_verification::validate_data_column_sidecar_for_gossip_fulu; use beacon_chain::kzg_utils::blobs_to_data_column_sidecars; use beacon_chain::observed_data_sidecars::DoNotObserve; use beacon_chain::test_utils::{ @@ -39,12 +39,14 @@ use std::iter::Iterator; use std::sync::Arc; use std::time::Duration; use tokio::sync::mpsc; -use types::data::{BlobIdentifier, FixedBlobSidecarList}; use types::{ - AttesterSlashing, BlobSidecar, BlobSidecarList, ChainSpec, DataColumnSidecarList, - DataColumnSubnetId, Epoch, EthSpec, Hash256, MainnetEthSpec, ProposerSlashing, - SignedAggregateAndProof, SignedBeaconBlock, SignedVoluntaryExit, SingleAttestation, Slot, - SubnetId, + AttesterSlashing, BlobSidecar, ChainSpec, DataColumnSidecarList, DataColumnSubnetId, Epoch, + EthSpec, Hash256, MainnetEthSpec, ProposerSlashing, SignedAggregateAndProof, SignedBeaconBlock, + SignedVoluntaryExit, SingleAttestation, Slot, SubnetId, +}; +use types::{ + BlobSidecarList, + data::{BlobIdentifier, FixedBlobSidecarList}, }; type E = MainnetEthSpec; @@ -309,7 +311,7 @@ impl TestRig { ) .unwrap() .into_iter() - .filter(|c| sampling_indices.contains(&c.index)) + .filter(|c| sampling_indices.contains(c.index())) .collect::>(); (None, Some(custody_columns)) @@ -386,7 +388,7 @@ impl TestRig { .send_gossip_data_column_sidecar( junk_message_id(), junk_peer_id(), - DataColumnSubnetId::from_column_index(data_column.index, &self.chain.spec), + DataColumnSubnetId::from_column_index(*data_column.index(), &self.chain.spec), data_column.clone(), Duration::from_secs(0), ) @@ -1115,8 +1117,8 @@ async fn accept_processed_gossip_data_columns_without_import() { .into_iter() .map(|data_column| { let subnet_id = - DataColumnSubnetId::from_column_index(data_column.index, &rig.chain.spec); - validate_data_column_sidecar_for_gossip::<_, DoNotObserve>( + DataColumnSubnetId::from_column_index(*data_column.index(), &rig.chain.spec); + validate_data_column_sidecar_for_gossip_fulu::<_, DoNotObserve>( data_column, subnet_id, &rig.chain, @@ -1948,7 +1950,7 @@ async fn test_data_columns_by_range_request_only_returns_requested_columns() { } = next { if let Some(column) = data_column { - received_columns.push(column.index); + received_columns.push(*column.index()); } else { break; } diff --git a/beacon_node/network/src/sync/block_lookups/mod.rs b/beacon_node/network/src/sync/block_lookups/mod.rs index f8ffd298caf..9065f05753d 100644 --- a/beacon_node/network/src/sync/block_lookups/mod.rs +++ b/beacon_node/network/src/sync/block_lookups/mod.rs @@ -88,7 +88,11 @@ impl BlockComponent { match self { BlockComponent::Block(block) => block.value.parent_root(), BlockComponent::Blob(blob) => blob.value.block_parent_root(), - BlockComponent::DataColumn(column) => column.value.block_parent_root(), + BlockComponent::DataColumn(column) => match column.value.as_ref() { + DataColumnSidecar::Fulu(column) => column.block_parent_root(), + // TODO(gloas) we don't have a parent root post gloas, not sure what to do here + DataColumnSidecar::Gloas(column) => column.beacon_block_root, + }, } } fn get_type(&self) -> &'static str { diff --git a/beacon_node/network/src/sync/block_sidecar_coupling.rs b/beacon_node/network/src/sync/block_sidecar_coupling.rs index 6f563820f7c..27e334fa104 100644 --- a/beacon_node/network/src/sync/block_sidecar_coupling.rs +++ b/beacon_node/network/src/sync/block_sidecar_coupling.rs @@ -346,7 +346,7 @@ impl RangeBlockComponentsRequest { for column in data_columns { let block_root = column.block_root(); - let index = column.index; + let index = *column.index(); if data_columns_by_block .entry(block_root) .or_default() @@ -624,7 +624,7 @@ mod tests { *req, blocks .iter() - .flat_map(|b| b.1.iter().filter(|d| d.index == column_index).cloned()) + .flat_map(|b| b.1.iter().filter(|d| *d.index() == column_index).cloned()) .collect(), ) .unwrap(); @@ -707,7 +707,7 @@ mod tests { .iter() .flat_map(|b| { b.1.iter() - .filter(|d| column_indices.contains(&d.index)) + .filter(|d| column_indices.contains(d.index())) .cloned() }) .collect::>(), @@ -779,7 +779,7 @@ mod tests { *req, blocks .iter() - .flat_map(|b| b.1.iter().filter(|d| d.index == column_index).cloned()) + .flat_map(|b| b.1.iter().filter(|d| *d.index() == column_index).cloned()) .collect(), ) .unwrap(); @@ -864,7 +864,7 @@ mod tests { *req1, blocks .iter() - .flat_map(|b| b.1.iter().filter(|d| d.index == 1).cloned()) + .flat_map(|b| b.1.iter().filter(|d| *d.index() == 1).cloned()) .collect(), ) .unwrap(); @@ -891,7 +891,7 @@ mod tests { new_columns_req_id, blocks .iter() - .flat_map(|b| b.1.iter().filter(|d| d.index == 2).cloned()) + .flat_map(|b| b.1.iter().filter(|d| *d.index() == 2).cloned()) .collect(), ) .unwrap(); @@ -957,7 +957,7 @@ mod tests { *req1, blocks .iter() - .flat_map(|b| b.1.iter().filter(|d| d.index == 1).cloned()) + .flat_map(|b| b.1.iter().filter(|d| *d.index() == 1).cloned()) .collect(), ) .unwrap(); diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index 338f21ce987..096ed9c3282 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -871,20 +871,28 @@ impl SyncManager { SyncMessage::UnknownParentDataColumn(peer_id, data_column) => { let data_column_slot = data_column.slot(); let block_root = data_column.block_root(); - let parent_root = data_column.block_parent_root(); - debug!(%block_root, %parent_root, "Received unknown parent data column message"); - self.handle_unknown_parent( - peer_id, - block_root, - parent_root, - data_column_slot, - BlockComponent::DataColumn(DownloadResult { - value: data_column, - block_root, - seen_timestamp: timestamp_now(), - peer_group: PeerGroup::from_single(peer_id), - }), - ); + match data_column.as_ref() { + DataColumnSidecar::Fulu(column) => { + let parent_root = column.block_parent_root(); + debug!(%block_root, %parent_root, "Received unknown parent data column message"); + self.handle_unknown_parent( + peer_id, + block_root, + parent_root, + data_column_slot, + BlockComponent::DataColumn(DownloadResult { + value: data_column, + block_root, + seen_timestamp: timestamp_now(), + peer_group: PeerGroup::from_single(peer_id), + }), + ); + } + // TODO(gloas) support gloas data column variant + DataColumnSidecar::Gloas(_) => { + error!("Gloas variant not yet supported") + } + } } SyncMessage::UnknownBlockHashFromAttestation(peer_id, block_root) => { if !self.notified_unknown_roots.contains(&(peer_id, block_root)) { diff --git a/beacon_node/network/src/sync/network_context/custody.rs b/beacon_node/network/src/sync/network_context/custody.rs index fe0924fae86..b91028a8f6d 100644 --- a/beacon_node/network/src/sync/network_context/custody.rs +++ b/beacon_node/network/src/sync/network_context/custody.rs @@ -128,7 +128,7 @@ impl ActiveCustodyRequest { // requested index. The worse case is 128 loops over a 128 item vec + mutation to // drop the consumed columns. let mut data_columns = HashMap::::from_iter( - data_columns.into_iter().map(|d| (d.index, d)), + data_columns.into_iter().map(|d| (*d.index(), d)), ); // Accumulate columns that the peer does not have to issue a single log per request let mut missing_column_indexes = vec![]; @@ -210,7 +210,7 @@ impl ActiveCustodyRequest { peers .entry(peer) .or_default() - .push(data_column.index as usize); + .push(*data_column.index() as usize); seen_timestamps.push(seen_timestamp); Ok(data_column) }) diff --git a/beacon_node/network/src/sync/network_context/requests/data_columns_by_range.rs b/beacon_node/network/src/sync/network_context/requests/data_columns_by_range.rs index 9dabb2defa0..74bdd0a1143 100644 --- a/beacon_node/network/src/sync/network_context/requests/data_columns_by_range.rs +++ b/beacon_node/network/src/sync/network_context/requests/data_columns_by_range.rs @@ -28,18 +28,22 @@ impl ActiveRequestItems for DataColumnsByRangeRequestItems { { return Err(LookupVerifyError::UnrequestedSlot(data_column.slot())); } - if !self.request.columns.contains(&data_column.index) { - return Err(LookupVerifyError::UnrequestedIndex(data_column.index)); + if !self.request.columns.contains(data_column.index()) { + return Err(LookupVerifyError::UnrequestedIndex(*data_column.index())); } - if !data_column.verify_inclusion_proof() { + + if let DataColumnSidecar::Fulu(data_column) = data_column.as_ref() + && !data_column.verify_inclusion_proof() + { return Err(LookupVerifyError::InvalidInclusionProof); } + if self.items.iter().any(|existing| { - existing.slot() == data_column.slot() && existing.index == data_column.index + existing.slot() == data_column.slot() && *existing.index() == *data_column.index() }) { return Err(LookupVerifyError::DuplicatedData( data_column.slot(), - data_column.index, + *data_column.index(), )); } diff --git a/beacon_node/network/src/sync/network_context/requests/data_columns_by_root.rs b/beacon_node/network/src/sync/network_context/requests/data_columns_by_root.rs index 34df801eaa8..5ad0f377c10 100644 --- a/beacon_node/network/src/sync/network_context/requests/data_columns_by_root.rs +++ b/beacon_node/network/src/sync/network_context/requests/data_columns_by_root.rs @@ -56,16 +56,24 @@ impl ActiveRequestItems for DataColumnsByRootRequestItems { if self.request.block_root != block_root { return Err(LookupVerifyError::UnrequestedBlockRoot(block_root)); } - if !data_column.verify_inclusion_proof() { + + if let DataColumnSidecar::Fulu(data_column) = data_column.as_ref() + && !data_column.verify_inclusion_proof() + { return Err(LookupVerifyError::InvalidInclusionProof); } - if !self.request.indices.contains(&data_column.index) { - return Err(LookupVerifyError::UnrequestedIndex(data_column.index)); + + if !self.request.indices.contains(data_column.index()) { + return Err(LookupVerifyError::UnrequestedIndex(*data_column.index())); } - if self.items.iter().any(|d| d.index == data_column.index) { + if self + .items + .iter() + .any(|d| *d.index() == *data_column.index()) + { return Err(LookupVerifyError::DuplicatedData( data_column.slot(), - data_column.index, + *data_column.index(), )); } diff --git a/beacon_node/network/src/sync/range_data_column_batch_request.rs b/beacon_node/network/src/sync/range_data_column_batch_request.rs index b912a6badc9..4a6987a752b 100644 --- a/beacon_node/network/src/sync/range_data_column_batch_request.rs +++ b/beacon_node/network/src/sync/range_data_column_batch_request.rs @@ -189,9 +189,15 @@ impl RangeDataColumnBatchRequest { .unique() .collect::>(); + // TODO(gloas) no block signatures to check post-gloas, double check what to do here let column_block_signatures = columns .iter() - .map(|column| column.signed_block_header.signature.clone()) + .filter_map(|column| match column.as_ref() { + DataColumnSidecar::Fulu(column) => { + Some(column.signed_block_header.signature.clone()) + } + _ => None, + }) .unique() .collect::>(); @@ -201,8 +207,8 @@ impl RangeDataColumnBatchRequest { // If there are no block roots, penalize all peers [] => { for column in &columns { - if let Some(naughty_peer) = column_to_peer.get(&column.index) { - naughty_peers.push((column.index, *naughty_peer)); + if let Some(naughty_peer) = column_to_peer.get(column.index()) { + naughty_peers.push((*column.index(), *naughty_peer)); } } continue; @@ -212,9 +218,9 @@ impl RangeDataColumnBatchRequest { for column in columns { if column_block_roots.contains(&column.block_root()) && block_root != column.block_root() - && let Some(naughty_peer) = column_to_peer.get(&column.index) + && let Some(naughty_peer) = column_to_peer.get(column.index()) { - naughty_peers.push((column.index, *naughty_peer)); + naughty_peers.push((*column.index(), *naughty_peer)); } } continue; @@ -227,17 +233,19 @@ impl RangeDataColumnBatchRequest { // If there are no block signatures, penalize all peers [] => { for column in &columns { - if let Some(naughty_peer) = column_to_peer.get(&column.index) { - naughty_peers.push((column.index, *naughty_peer)); + if let Some(naughty_peer) = column_to_peer.get(column.index()) { + naughty_peers.push((*column.index(), *naughty_peer)); } } continue; } // If theres more than one unique block signature, penalize the peers serving the - // invalid block signatures. + // invalid block signatures. This check is only relevant for Fulu. column_block_signatures => { for column in columns { - if column_block_signatures.contains(&column.signed_block_header.signature) + if let DataColumnSidecar::Fulu(column) = column.as_ref() + && column_block_signatures + .contains(&column.signed_block_header.signature) && block.signature() != &column.signed_block_header.signature && let Some(naughty_peer) = column_to_peer.get(&column.index) { @@ -251,8 +259,8 @@ impl RangeDataColumnBatchRequest { // if the block root doesn't match the columns block root, penalize the peers if block_root != column_block_root { for column in &columns { - if let Some(naughty_peer) = column_to_peer.get(&column.index) { - naughty_peers.push((column.index, *naughty_peer)); + if let Some(naughty_peer) = column_to_peer.get(column.index()) { + naughty_peers.push((*column.index(), *naughty_peer)); } } } @@ -260,13 +268,13 @@ impl RangeDataColumnBatchRequest { // If the block signature doesn't match the columns block signature, penalize the peers if block.signature() != column_block_signature { for column in &columns { - if let Some(naughty_peer) = column_to_peer.get(&column.index) { - naughty_peers.push((column.index, *naughty_peer)); + if let Some(naughty_peer) = column_to_peer.get(column.index()) { + naughty_peers.push((*column.index(), *naughty_peer)); } } } - let received_columns = columns.iter().map(|c| c.index).collect::>(); + let received_columns = columns.iter().map(|c| *c.index()).collect::>(); let missing_columns = expected_custody_columns .difference(&received_columns) diff --git a/beacon_node/network/src/sync/tests/range.rs b/beacon_node/network/src/sync/tests/range.rs index cb728a90c1b..9cda9fec95c 100644 --- a/beacon_node/network/src/sync/tests/range.rs +++ b/beacon_node/network/src/sync/tests/range.rs @@ -393,7 +393,7 @@ impl TestRig { let data_sidecars = if fork.fulu_enabled() { store - .get_data_columns(&block_root) + .get_data_columns(&block_root, fork) .unwrap() .map(|columns| { columns diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index 8eec4d5eceb..fd6190d8e7f 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -114,7 +114,7 @@ impl BlockCache { pub fn put_data_column(&mut self, block_root: Hash256, data_column: Arc>) { self.data_column_cache .get_or_insert_mut(block_root, Default::default) - .insert(data_column.index, data_column); + .insert(*data_column.index(), data_column); } pub fn put_data_column_custody_info( &mut self, @@ -969,7 +969,7 @@ impl, Cold: ItemStore> HotColdDB ) { ops.push(KeyValueStoreOp::PutKeyValue( DBColumn::BeaconDataColumn, - get_data_column_key(block_root, &data_column.index), + get_data_column_key(block_root, data_column.index()), data_column.as_ssz_bytes(), )); } @@ -1002,7 +1002,7 @@ impl, Cold: ItemStore> HotColdDB for data_column in data_columns { self.blobs_db.put_bytes( DBColumn::BeaconDataColumn, - &get_data_column_key(block_root, &data_column.index), + &get_data_column_key(block_root, data_column.index()), &data_column.as_ssz_bytes(), )?; self.block_cache @@ -1021,7 +1021,7 @@ impl, Cold: ItemStore> HotColdDB for data_column in data_columns { ops.push(KeyValueStoreOp::PutKeyValue( DBColumn::BeaconDataColumn, - get_data_column_key(block_root, &data_column.index), + get_data_column_key(block_root, data_column.index()), data_column.as_ssz_bytes(), )); } @@ -1301,7 +1301,7 @@ impl, Cold: ItemStore> HotColdDB )); } - StoreOp::DeleteDataColumns(block_root, column_indices) => { + StoreOp::DeleteDataColumns(block_root, column_indices, _) => { for index in column_indices { let key = get_data_column_key(&block_root, &index); key_value_batch @@ -1415,10 +1415,10 @@ impl, Cold: ItemStore> HotColdDB } true } - StoreOp::DeleteDataColumns(block_root, indices) => { + StoreOp::DeleteDataColumns(block_root, indices, fork_name) => { match indices .iter() - .map(|index| self.get_data_column(block_root, index)) + .map(|index| self.get_data_column(block_root, index, *fork_name)) .collect::, _>>() { Ok(data_column_sidecar_list_opt) => { @@ -1471,14 +1471,22 @@ impl, Cold: ItemStore> HotColdDB let reverse_op = match op { StoreOp::PutBlobs(block_root, _) => StoreOp::DeleteBlobs(*block_root), StoreOp::PutDataColumns(block_root, data_columns) => { - let indices = data_columns.iter().map(|c| c.index).collect(); - StoreOp::DeleteDataColumns(*block_root, indices) + let indices = data_columns.iter().map(|c| *c.index()).collect(); + if let Some(column) = data_columns.first() { + let slot = column.slot(); + let fork_name = self.spec.fork_name_at_slot::(slot); + StoreOp::DeleteDataColumns(*block_root, indices, fork_name) + } else { + return Err(Error::DBError { + message: "Failed to rollback data columns".to_owned(), + }); + } } StoreOp::DeleteBlobs(_) => match blobs_to_delete.pop() { Some((block_root, blobs)) => StoreOp::PutBlobs(block_root, blobs), None => return Err(HotColdDBError::Rollback.into()), }, - StoreOp::DeleteDataColumns(_, _) => match data_columns_to_delete.pop() { + StoreOp::DeleteDataColumns(_, _, _) => match data_columns_to_delete.pop() { Some((block_root, data_columns)) => { StoreOp::PutDataColumns(block_root, data_columns) } @@ -1530,7 +1538,7 @@ impl, Cold: ItemStore> HotColdDB StoreOp::DeleteBlobs(_) => (), - StoreOp::DeleteDataColumns(_, _) => (), + StoreOp::DeleteDataColumns(_, _, _) => (), StoreOp::DeleteExecutionPayload(_) => (), @@ -2506,12 +2514,16 @@ impl, Cold: ItemStore> HotColdDB pub fn get_data_columns( &self, block_root: &Hash256, + fork_name: ForkName, ) -> Result>, Error> { let column_indices = self.get_data_column_keys(*block_root)?; let columns: DataColumnSidecarList = column_indices .into_iter() - .filter_map(|col_index| self.get_data_column(block_root, &col_index).transpose()) + .filter_map(|col_index| { + self.get_data_column(block_root, &col_index, fork_name) + .transpose() + }) .collect::>()?; Ok((!columns.is_empty()).then_some(columns)) @@ -2585,6 +2597,7 @@ impl, Cold: ItemStore> HotColdDB &self, block_root: &Hash256, column_index: &ColumnIndex, + fork_name: ForkName, ) -> Result>>, Error> { // Check the cache. if let Some(data_column) = self @@ -2601,7 +2614,10 @@ impl, Cold: ItemStore> HotColdDB &get_data_column_key(block_root, column_index), )? { Some(ref data_column_bytes) => { - let data_column = Arc::new(DataColumnSidecar::from_ssz_bytes(data_column_bytes)?); + let data_column = Arc::new(DataColumnSidecar::from_ssz_bytes_for_fork( + data_column_bytes, + fork_name, + )?); self.block_cache.as_ref().inspect(|cache| { cache .lock() diff --git a/beacon_node/store/src/lib.rs b/beacon_node/store/src/lib.rs index ae5b2e1e571..83ca43ebaa5 100644 --- a/beacon_node/store/src/lib.rs +++ b/beacon_node/store/src/lib.rs @@ -237,7 +237,7 @@ pub enum StoreOp<'a, E: EthSpec> { PutStateSummary(Hash256, HotStateSummary), DeleteBlock(Hash256), DeleteBlobs(Hash256), - DeleteDataColumns(Hash256, Vec), + DeleteDataColumns(Hash256, Vec, ForkName), DeleteState(Hash256, Option), DeleteExecutionPayload(Hash256), DeleteSyncCommitteeBranch(Hash256), diff --git a/common/eth2/src/types.rs b/common/eth2/src/types.rs index b1a61ce00cc..c1572ca354c 100644 --- a/common/eth2/src/types.rs +++ b/common/eth2/src/types.rs @@ -1016,14 +1016,14 @@ impl SseDataColumnSidecar { pub fn from_data_column_sidecar( data_column_sidecar: &DataColumnSidecar, ) -> SseDataColumnSidecar { - let kzg_commitments = data_column_sidecar.kzg_commitments.to_vec(); + let kzg_commitments = data_column_sidecar.kzg_commitments().to_vec(); let versioned_hashes = kzg_commitments .iter() .map(|c| c.calculate_versioned_hash()) .collect(); SseDataColumnSidecar { block_root: data_column_sidecar.block_root(), - index: data_column_sidecar.index, + index: *data_column_sidecar.index(), slot: data_column_sidecar.slot(), kzg_commitments, versioned_hashes, diff --git a/consensus/types/src/data/data_column_sidecar.rs b/consensus/types/src/data/data_column_sidecar.rs index 71d821f83ef..bb18e0a180b 100644 --- a/consensus/types/src/data/data_column_sidecar.rs +++ b/consensus/types/src/data/data_column_sidecar.rs @@ -7,10 +7,11 @@ use kzg::{KzgCommitment, KzgProof}; use merkle_proof::verify_merkle_proof; use safe_arith::ArithError; use serde::{Deserialize, Serialize}; -use ssz::Encode; +use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; use ssz_types::Error as SszError; use ssz_types::{FixedVector, VariableList}; +use superstruct::superstruct; use test_random_derive::TestRandom; use tree_hash::TreeHash; use tree_hash_derive::TreeHash; @@ -38,15 +39,49 @@ pub struct DataColumnsByRootIdentifier { pub type DataColumnSidecarList = Vec>>; +#[derive(Debug, PartialEq, Clone)] +pub enum Error { + IncorrectStateVariant, +} + +#[superstruct( + variants(Fulu, Gloas), + variant_attributes( + derive( + Debug, + Clone, + Serialize, + Deserialize, + Decode, + Encode, + TestRandom, + Educe, + TreeHash, + ), + context_deserialize(ForkName), + educe(PartialEq, Hash(bound(E: EthSpec))), + serde(bound = "E: EthSpec", deny_unknown_fields), + cfg_attr( + feature = "arbitrary", + derive(arbitrary::Arbitrary), + arbitrary(bound = "E: EthSpec") + ) + ), + ref_attributes(derive(TreeHash), tree_hash(enum_behaviour = "transparent")), + cast_error(ty = "Error", expr = "Error::IncorrectStateVariant"), + partial_getter_error(ty = "Error", expr = "Error::IncorrectStateVariant") +)] #[cfg_attr( feature = "arbitrary", derive(arbitrary::Arbitrary), arbitrary(bound = "E: EthSpec") )] -#[derive(Debug, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, Educe)] -#[serde(bound = "E: EthSpec")] -#[educe(PartialEq, Eq, Hash(bound(E: EthSpec)))] -#[context_deserialize(ForkName)] +#[derive(Debug, Clone, Serialize, TreeHash, Encode, Decode, Educe, Deserialize)] +#[educe(PartialEq, Hash(bound(E: EthSpec)))] +#[serde(untagged)] +#[tree_hash(enum_behaviour = "transparent")] +#[ssz(enum_behaviour = "transparent")] +#[serde(bound = "E: EthSpec", deny_unknown_fields)] pub struct DataColumnSidecar { #[serde(with = "serde_utils::quoted_u64")] pub index: ColumnIndex, @@ -55,20 +90,58 @@ pub struct DataColumnSidecar { /// All the KZG commitments and proofs associated with the block, used for verifying sample cells. pub kzg_commitments: KzgCommitments, pub kzg_proofs: VariableList, + #[superstruct(only(Fulu))] pub signed_block_header: SignedBeaconBlockHeader, /// An inclusion proof, proving the inclusion of `blob_kzg_commitments` in `BeaconBlockBody`. + #[superstruct(only(Fulu))] pub kzg_commitments_inclusion_proof: FixedVector, + #[superstruct(only(Gloas), partial_getter(rename = "slot_gloas"))] + pub slot: Slot, + #[superstruct(only(Gloas))] + pub beacon_block_root: Hash256, } impl DataColumnSidecar { pub fn slot(&self) -> Slot { - self.signed_block_header.message.slot + match self { + DataColumnSidecar::Fulu(column) => column.slot(), + DataColumnSidecar::Gloas(column) => column.slot, + } } pub fn epoch(&self) -> Epoch { self.slot().epoch(E::slots_per_epoch()) } + pub fn block_root(&self) -> Hash256 { + match self { + DataColumnSidecar::Fulu(column) => column.block_root(), + DataColumnSidecar::Gloas(column) => column.beacon_block_root, + } + } + + /// Custom SSZ decoder that takes a `ForkName` as context. + pub fn from_ssz_bytes_for_fork( + bytes: &[u8], + fork_name: ForkName, + ) -> Result { + if fork_name.gloas_enabled() { + Ok(DataColumnSidecar::Gloas( + DataColumnSidecarGloas::from_ssz_bytes(bytes)?, + )) + } else { + Ok(DataColumnSidecar::Fulu( + DataColumnSidecarFulu::from_ssz_bytes(bytes)?, + )) + } + } +} + +impl DataColumnSidecarFulu { + pub fn slot(&self) -> Slot { + self.signed_block_header.message.slot + } + pub fn block_root(&self) -> Hash256 { self.signed_block_header.message.tree_hash_root() } @@ -132,6 +205,39 @@ impl DataColumnSidecar { } } +impl DataColumnSidecarGloas { + pub fn min_size() -> usize { + // min size is one cell + Self { + index: 0, + column: VariableList::new(vec![Cell::::default()]).unwrap(), + kzg_commitments: VariableList::new(vec![KzgCommitment::empty_for_testing()]).unwrap(), + kzg_proofs: VariableList::new(vec![KzgProof::empty()]).unwrap(), + slot: Slot::new(0), + beacon_block_root: Hash256::ZERO, + } + .as_ssz_bytes() + .len() + } + + pub fn max_size(max_blobs_per_block: usize) -> usize { + Self { + index: 0, + column: VariableList::new(vec![Cell::::default(); max_blobs_per_block]).unwrap(), + kzg_commitments: VariableList::new(vec![ + KzgCommitment::empty_for_testing(); + max_blobs_per_block + ]) + .unwrap(), + kzg_proofs: VariableList::new(vec![KzgProof::empty(); max_blobs_per_block]).unwrap(), + slot: Slot::new(0), + beacon_block_root: Hash256::ZERO, + } + .as_ssz_bytes() + .len() + } +} + #[derive(Debug)] pub enum DataColumnSidecarError { ArithError(ArithError), diff --git a/consensus/types/src/data/mod.rs b/consensus/types/src/data/mod.rs index 10d062bada9..eab16896496 100644 --- a/consensus/types/src/data/mod.rs +++ b/consensus/types/src/data/mod.rs @@ -13,7 +13,8 @@ pub use data_column_custody_group::{ }; pub use data_column_sidecar::{ Cell, ColumnIndex, DataColumn, DataColumnSidecar, DataColumnSidecarError, - DataColumnSidecarList, DataColumnsByRootIdentifier, + DataColumnSidecarFulu, DataColumnSidecarGloas, DataColumnSidecarList, + DataColumnsByRootIdentifier, }; pub use data_column_subnet_id::DataColumnSubnetId; diff --git a/testing/ef_tests/src/cases/fork_choice.rs b/testing/ef_tests/src/cases/fork_choice.rs index 93a8805240a..b0b01bceff7 100644 --- a/testing/ef_tests/src/cases/fork_choice.rs +++ b/testing/ef_tests/src/cases/fork_choice.rs @@ -530,7 +530,8 @@ impl Tester { let gossip_verified_data_columns = columns .into_iter() .map(|column| { - let subnet_id = DataColumnSubnetId::from_column_index(column.index, &self.spec); + let subnet_id = + DataColumnSubnetId::from_column_index(*column.index(), &self.spec); GossipVerifiedDataColumn::new(column.clone(), subnet_id, &self.harness.chain) .unwrap_or_else(|_| { data_column_success = false; diff --git a/testing/ef_tests/src/handler.rs b/testing/ef_tests/src/handler.rs index b974a17a804..afa6304eaec 100644 --- a/testing/ef_tests/src/handler.rs +++ b/testing/ef_tests/src/handler.rs @@ -305,6 +305,10 @@ impl SszStaticHandler { Self::for_forks(vec![ForkName::Fulu]) } + pub fn gloas_only() -> Self { + Self::for_forks(vec![ForkName::Gloas]) + } + pub fn altair_and_later() -> Self { Self::for_forks(ForkName::list_all()[1..].to_vec()) } diff --git a/testing/ef_tests/src/type_name.rs b/testing/ef_tests/src/type_name.rs index 6dd1df1d8d1..87d56968cc3 100644 --- a/testing/ef_tests/src/type_name.rs +++ b/testing/ef_tests/src/type_name.rs @@ -61,6 +61,8 @@ type_name!(BlobIdentifier); type_name_generic!(DataColumnsByRootIdentifier, "DataColumnsByRootIdentifier"); type_name_generic!(BlobSidecar); type_name_generic!(DataColumnSidecar); +type_name_generic!(DataColumnSidecarFulu, "DataColumnSidecar"); +type_name_generic!(DataColumnSidecarGloas, "DataColumnSidecar"); type_name!(Checkpoint); type_name!(ConsolidationRequest); type_name_generic!(ContributionAndProof); diff --git a/testing/ef_tests/tests/tests.rs b/testing/ef_tests/tests/tests.rs index b2f65db6f3e..0af350f1dbd 100644 --- a/testing/ef_tests/tests/tests.rs +++ b/testing/ef_tests/tests/tests.rs @@ -241,8 +241,9 @@ mod ssz_static { use ef_tests::{Handler, SszStaticHandler, SszStaticTHCHandler, SszStaticWithSpecHandler}; use types::state::HistoricalSummary; use types::{ - AttesterSlashingBase, AttesterSlashingElectra, ConsolidationRequest, DepositRequest, - LightClientBootstrapAltair, PendingDeposit, PendingPartialWithdrawal, WithdrawalRequest, *, + AttesterSlashingBase, AttesterSlashingElectra, ConsolidationRequest, DataColumnSidecarFulu, + DepositRequest, LightClientBootstrapAltair, PendingDeposit, PendingPartialWithdrawal, + WithdrawalRequest, *, }; ssz_static_test!(attestation_data, AttestationData); @@ -659,9 +660,9 @@ mod ssz_static { #[test] fn data_column_sidecar() { - SszStaticHandler::, MinimalEthSpec>::fulu_and_later() + SszStaticHandler::, MinimalEthSpec>::fulu_only() .run(); - SszStaticHandler::, MainnetEthSpec>::fulu_and_later() + SszStaticHandler::, MainnetEthSpec>::fulu_only() .run(); }