@@ -843,19 +843,19 @@ impl<T: BeaconChainTypes> NetworkBeaconProcessor<T> {
843
843
block_root : Hash256 ,
844
844
publish_blobs : bool ,
845
845
) {
846
+ let custody_columns = self . network_globals . sampling_columns . clone ( ) ;
846
847
let is_supernode = self . network_globals . is_supernode ( ) ;
847
-
848
848
let self_cloned = self . clone ( ) ;
849
849
let publish_fn = move |blobs_or_data_column| {
850
- // At the moment non supernodes are not required to publish any columns.
851
- // TODO(das): we could experiment with having full nodes publish their custodied
852
- // columns here.
853
- if publish_blobs && is_supernode {
850
+ if publish_blobs {
854
851
match blobs_or_data_column {
855
852
BlobsOrDataColumns :: Blobs ( blobs) => {
856
853
self_cloned. publish_blobs_gradually ( blobs, block_root) ;
857
854
}
858
- BlobsOrDataColumns :: DataColumns ( columns) => {
855
+ BlobsOrDataColumns :: DataColumns ( mut columns) => {
856
+ if !is_supernode {
857
+ columns. retain ( |col| custody_columns. contains ( & col. index ) ) ;
858
+ }
859
859
self_cloned. publish_data_columns_gradually ( columns, block_root) ;
860
860
}
861
861
} ;
@@ -1055,7 +1055,7 @@ impl<T: BeaconChainTypes> NetworkBeaconProcessor<T> {
1055
1055
///
1056
1056
/// This is an optimisation to reduce outbound bandwidth and ensures each column is published
1057
1057
/// by some nodes on the network as soon as possible. Our hope is that some columns arrive from
1058
- /// other supernodes in the meantime, obviating the need for us to publish them. If no other
1058
+ /// other nodes in the meantime, obviating the need for us to publish them. If no other
1059
1059
/// publisher exists for a column, it will eventually get published here.
1060
1060
fn publish_data_columns_gradually (
1061
1061
self : & Arc < Self > ,
@@ -1080,9 +1080,9 @@ impl<T: BeaconChainTypes> NetworkBeaconProcessor<T> {
1080
1080
} ) ;
1081
1081
} ;
1082
1082
1083
- // If this node is a super node, permute the columns and split them into batches.
1083
+ // Permute the columns and split them into batches.
1084
1084
// The hope is that we won't need to publish some columns because we will receive them
1085
- // on gossip from other supernodes .
1085
+ // on gossip from other nodes .
1086
1086
data_columns_to_publish. shuffle ( & mut rand:: thread_rng ( ) ) ;
1087
1087
1088
1088
let blob_publication_batch_interval = chain. config . blob_publication_batch_interval ;
0 commit comments