Skip to content
70 changes: 53 additions & 17 deletions beacon_node/beacon_chain/src/beacon_chain.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1131,13 +1131,18 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
.or_else(|| self.early_attester_cache.get_data_columns(block_root));

if let Some(mut all_cached_columns) = all_cached_columns_opt {
all_cached_columns.retain(|col| indices.contains(&col.index));
all_cached_columns.retain(|col| indices.contains(col.index()));
Ok(all_cached_columns)
} else {
} else if let Some(block) = self.get_blinded_block(&block_root)? {
indices
.iter()
.filter_map(|index| self.get_data_column(&block_root, index).transpose())
.filter_map(|index| {
self.get_data_column(&block_root, index, block.fork_name_unchecked())
.transpose()
})
.collect::<Result<_, _>>()
} else {
Ok(vec![])
}
}

Expand Down Expand Up @@ -1222,8 +1227,11 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
pub fn get_data_columns(
&self,
block_root: &Hash256,
fork_name: ForkName,
) -> Result<Option<DataColumnSidecarList<T::EthSpec>>, Error> {
self.store.get_data_columns(block_root).map_err(Error::from)
self.store
.get_data_columns(block_root, fork_name)
.map_err(Error::from)
}

/// Returns the blobs at the given root, if any.
Expand All @@ -1244,7 +1252,8 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
};

if self.spec.is_peer_das_enabled_for_epoch(block.epoch()) {
if let Some(columns) = self.store.get_data_columns(block_root)? {
let fork_name = self.spec.fork_name_at_epoch(block.epoch());
if let Some(columns) = self.store.get_data_columns(block_root, fork_name)? {
let num_required_columns = T::EthSpec::number_of_columns() / 2;
let reconstruction_possible = columns.len() >= num_required_columns;
if reconstruction_possible {
Expand All @@ -1260,7 +1269,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
Ok(None)
}
} else {
self.get_blobs(block_root).map(|b| b.blobs())
Ok(self.get_blobs(block_root)?.blobs())
}
}

Expand All @@ -1272,8 +1281,11 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
&self,
block_root: &Hash256,
column_index: &ColumnIndex,
fork_name: ForkName,
) -> Result<Option<Arc<DataColumnSidecar<T::EthSpec>>>, Error> {
Ok(self.store.get_data_column(block_root, column_index)?)
Ok(self
.store
.get_data_column(block_root, column_index, fork_name)?)
}

pub fn get_blinded_block(
Expand Down Expand Up @@ -3183,7 +3195,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
.cached_data_column_indexes(block_root)
.unwrap_or_default();
let new_data_columns =
data_columns_iter.filter(|b| !imported_data_columns.contains(&b.index));
data_columns_iter.filter(|b| !imported_data_columns.contains(b.index()));

for data_column in new_data_columns {
event_handler.register(EventKind::DataColumnSidecar(
Expand Down Expand Up @@ -3223,7 +3235,14 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
// Reject RPC columns referencing unknown parents. Otherwise we allow potentially invalid data
// into the da_checker, where invalid = descendant of invalid blocks.
// Note: custody_columns should have at least one item and all items have the same parent root.
if let Some(parent_root) = custody_columns.iter().map(|c| c.block_parent_root()).next()
// TODO(gloas) ensure this check is no longer relevant post gloas
if let Some(parent_root) = custody_columns
.iter()
.filter_map(|c| match c.as_ref() {
DataColumnSidecar::Fulu(column) => Some(column.block_parent_root()),
_ => None,
})
.next()
&& !self
.canonical_head
.fork_choice_read_lock()
Expand Down Expand Up @@ -3543,8 +3562,11 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
publish_fn: impl FnOnce() -> Result<(), BlockError>,
) -> Result<AvailabilityProcessingStatus, BlockError> {
if let Some(slasher) = self.slasher.as_ref() {
for data_colum in &data_columns {
slasher.accept_block_header(data_colum.signed_block_header());
for data_column in &data_columns {
// TODO(gloas) no block header post-gloas, what should we do here
if let DataColumnSidecar::Fulu(c) = data_column.as_data_column() {
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

slasher.accept_block_header(c.signed_block_header.clone());
}
}
}

Expand Down Expand Up @@ -3622,9 +3644,15 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
.put_kzg_verified_blobs(block_root, blobs)?
}
EngineGetBlobsOutput::CustodyColumns(data_columns) => {
// TODO(gloas) verify that this check is no longer relevant for gloas
self.check_data_column_sidecar_header_signature_and_slashability(
block_root,
data_columns.iter().map(|c| c.as_data_column()),
data_columns
.iter()
.filter_map(|c| match c.as_data_column() {
DataColumnSidecar::Fulu(column) => Some(column),
_ => None,
}),
)?;
self.data_availability_checker
.put_kzg_verified_custody_data_columns(block_root, data_columns)?
Expand All @@ -3643,9 +3671,13 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
block_root: Hash256,
custody_columns: DataColumnSidecarList<T::EthSpec>,
) -> Result<AvailabilityProcessingStatus, BlockError> {
// TODO(gloas) ensure that this check is no longer relevant post gloas
self.check_data_column_sidecar_header_signature_and_slashability(
block_root,
custody_columns.iter().map(|c| c.as_ref()),
custody_columns.iter().filter_map(|c| match c.as_ref() {
DataColumnSidecar::Fulu(fulu) => Some(fulu),
_ => None,
}),
)?;

// This slot value is purely informative for the consumers of
Expand All @@ -3663,7 +3695,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
fn check_data_column_sidecar_header_signature_and_slashability<'a>(
self: &Arc<Self>,
block_root: Hash256,
custody_columns: impl IntoIterator<Item = &'a DataColumnSidecar<T::EthSpec>>,
custody_columns: impl IntoIterator<Item = &'a DataColumnSidecarFulu<T::EthSpec>>,
) -> Result<(), BlockError> {
let mut slashable_cache = self.observed_slashable.write();
// Process all unique block headers - previous logic assumed all headers were identical and
Expand Down Expand Up @@ -7366,7 +7398,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
// Supernodes need to persist all sampled custody columns
if columns_to_custody.len() != self.spec.number_of_custody_groups as usize {
data_columns
.retain(|data_column| columns_to_custody.contains(&data_column.index));
.retain(|data_column| columns_to_custody.contains(data_column.index()));
}
debug!(
%block_root,
Expand All @@ -7379,7 +7411,11 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
}

/// Retrieves block roots (in ascending slot order) within some slot range from fork choice.
pub fn block_roots_from_fork_choice(&self, start_slot: u64, count: u64) -> Vec<Hash256> {
pub fn block_roots_from_fork_choice(
&self,
start_slot: u64,
count: u64,
) -> Vec<(Hash256, Slot)> {
let head_block_root = self.canonical_head.cached_head().head_block_root();
let fork_choice_read_lock = self.canonical_head.fork_choice_read_lock();
let block_roots_iter = fork_choice_read_lock
Expand All @@ -7390,7 +7426,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {

for (root, slot) in block_roots_iter {
if slot < end_slot && slot >= start_slot {
roots.push(root);
roots.push((root, slot));
}
if slot < start_slot {
break;
Expand Down
38 changes: 26 additions & 12 deletions beacon_node/beacon_chain/src/builder.rs
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ use crate::data_availability_checker::DataAvailabilityChecker;
use crate::fork_choice_signal::ForkChoiceSignalTx;
use crate::fork_revert::{reset_fork_choice_to_finalization, revert_to_fork_boundary};
use crate::graffiti_calculator::{GraffitiCalculator, GraffitiOrigin};
use crate::kzg_utils::build_data_column_sidecars;
use crate::kzg_utils::{build_data_column_sidecars_fulu, build_data_column_sidecars_gloas};
use crate::light_client_server_cache::LightClientServerCache;
use crate::migrate::{BackgroundMigrator, MigratorConfig};
use crate::observed_data_sidecars::ObservedDataSidecars;
Expand Down Expand Up @@ -42,6 +42,7 @@ use std::time::Duration;
use store::{Error as StoreError, HotColdDB, ItemStore, KeyValueStoreOp};
use task_executor::{ShutdownReason, TaskExecutor};
use tracing::{debug, error, info};
use tree_hash::TreeHash;
use types::data::CustodyIndex;
use types::{
BeaconBlock, BeaconState, BlobSidecarList, ChainSpec, ColumnIndex, DataColumnSidecarList,
Expand Down Expand Up @@ -1213,17 +1214,30 @@ fn build_data_columns_from_blobs<E: EthSpec>(
.blob_kzg_commitments()
.cloned()
.map_err(|e| format!("Unexpected pre Deneb block: {e:?}"))?;
let kzg_commitments_inclusion_proof = beacon_block_body
.kzg_commitments_merkle_proof()
.map_err(|e| format!("Failed to compute kzg commitments merkle proof: {e:?}"))?;
build_data_column_sidecars(
kzg_commitments,
kzg_commitments_inclusion_proof,
block.signed_block_header(),
blob_cells_and_proofs_vec,
spec,
)
.map_err(|e| format!("Failed to compute weak subjectivity data_columns: {e:?}"))?

if block.fork_name_unchecked().gloas_enabled() {
build_data_column_sidecars_gloas(
kzg_commitments,
block.message().tree_hash_root(),
block.slot(),
blob_cells_and_proofs_vec,
spec,
)
.map_err(|e| format!("Failed to compute weak subjectivity data_columns: {e:?}"))?
} else {
let kzg_commitments_inclusion_proof = beacon_block_body
.kzg_commitments_merkle_proof()
.map_err(|e| format!("Failed to compute kzg commitments merkle proof: {e:?}"))?;

build_data_column_sidecars_fulu(
kzg_commitments,
kzg_commitments_inclusion_proof,
block.signed_block_header(),
blob_cells_and_proofs_vec,
spec,
)
.map_err(|e| format!("Failed to compute weak subjectivity data_columns: {e:?}"))?
}
};
Ok(data_columns)
}
Expand Down
33 changes: 21 additions & 12 deletions beacon_node/beacon_chain/src/data_availability_checker.rs
Original file line number Diff line number Diff line change
Expand Up @@ -19,10 +19,10 @@ use std::sync::Arc;
use std::time::Duration;
use task_executor::TaskExecutor;
use tracing::{debug, error, instrument};
use types::data::{BlobIdentifier, BlobSidecar, FixedBlobSidecarList};
use types::data::{BlobIdentifier, FixedBlobSidecarList};
use types::{
BlobSidecarList, BlockImportSource, ChainSpec, DataColumnSidecar, DataColumnSidecarList, Epoch,
EthSpec, Hash256, SignedBeaconBlock, Slot,
BlobSidecar, BlobSidecarList, BlockImportSource, ChainSpec, DataColumnSidecar,
DataColumnSidecarList, Epoch, EthSpec, Hash256, SignedBeaconBlock, Slot,
};

mod error;
Expand Down Expand Up @@ -187,7 +187,7 @@ impl<T: BeaconChainTypes> DataAvailabilityChecker<T> {
self.availability_cache
.peek_pending_components(block_root, |components| {
components.is_some_and(|components| {
let cached_column_opt = components.get_cached_data_column(data_column.index);
let cached_column_opt = components.get_cached_data_column(*data_column.index());
cached_column_opt.is_some_and(|cached| *cached == *data_column)
})
})
Expand Down Expand Up @@ -877,7 +877,9 @@ mod test {
use std::time::Duration;
use store::HotColdDB;
use types::data::DataColumn;
use types::{ChainSpec, ColumnIndex, EthSpec, ForkName, MainnetEthSpec, Slot};
use types::{
ChainSpec, ColumnIndex, DataColumnSidecarFulu, EthSpec, ForkName, MainnetEthSpec, Slot,
};

type E = MainnetEthSpec;
type T = EphemeralHarnessType<E>;
Expand Down Expand Up @@ -932,7 +934,7 @@ mod test {
cgc_change_slot,
data_columns
.into_iter()
.filter(|d| requested_columns.contains(&d.index))
.filter(|d| requested_columns.contains(d.index()))
.collect(),
)
.expect("should put rpc custody columns");
Expand Down Expand Up @@ -1007,7 +1009,7 @@ mod test {
let requested_columns = &custody_columns[..10];
let gossip_columns = data_columns
.into_iter()
.filter(|d| requested_columns.contains(&d.index))
.filter(|d| requested_columns.contains(d.index()))
.map(GossipVerifiedDataColumn::<T>::__new_for_testing)
.collect::<Vec<_>>();
da_checker
Expand Down Expand Up @@ -1039,7 +1041,7 @@ mod test {

/// Regression test for KZG verification truncation bug (https://github.com/sigp/lighthouse/pull/7927)
#[test]
fn verify_kzg_for_rpc_blocks_should_not_truncate_data_columns() {
fn verify_kzg_for_rpc_blocks_should_not_truncate_data_columns_fulu() {
let spec = Arc::new(ForkName::Fulu.make_genesis_spec(E::default_spec()));
let mut rng = StdRng::seed_from_u64(0xDEADBEEF0BAD5EEDu64);
let da_checker = new_da_checker(spec.clone());
Expand All @@ -1065,10 +1067,17 @@ mod test {
data_columns
.into_iter()
.map(|d| {
let invalid_sidecar = DataColumnSidecar {
let invalid_sidecar = DataColumnSidecar::Fulu(DataColumnSidecarFulu {
column: DataColumn::<E>::empty(),
..d.as_ref().clone()
};
index: *d.index(),
kzg_commitments: d.kzg_commitments().clone(),
kzg_proofs: d.kzg_proofs().clone(),
signed_block_header: d.signed_block_header().unwrap().clone(),
kzg_commitments_inclusion_proof: d
.kzg_commitments_inclusion_proof()
.unwrap()
.clone(),
});
CustodyDataColumn::from_asserted_custody(Arc::new(invalid_sidecar))
})
.collect::<Vec<_>>()
Expand Down Expand Up @@ -1126,7 +1135,7 @@ mod test {
let custody_columns = custody_context.custody_columns_for_epoch(None, &spec);
let custody_columns = custody_columns
.iter()
.filter_map(|&col_idx| data_columns.iter().find(|d| d.index == col_idx).cloned())
.filter_map(|&col_idx| data_columns.iter().find(|d| *d.index() == col_idx).cloned())
.take(64)
.map(|d| {
KzgVerifiedCustodyDataColumn::from_asserted_custody(
Expand Down
Loading
Loading