-
Notifications
You must be signed in to change notification settings - Fork 1.1k
Take the header size into account for the total block size #10804
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: master
Are you sure you want to change the base?
Changes from 3 commits
8a10f6a
a8247d0
9a0b465
681e353
f901e70
52afdef
6a33f86
c16bdc4
da635c2
5a07c98
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -240,7 +240,7 @@ parameter_types! { | |
| pub const BlockHashCount: BlockNumber = 250; | ||
| pub const Version: RuntimeVersion = VERSION; | ||
| pub RuntimeBlockLength: BlockLength = | ||
| BlockLength::max_with_normal_ratio(5 * 1024 * 1024, NORMAL_DISPATCH_RATIO); | ||
| BlockLength::max_with_normal_ratio(12 * 1024 * 1024, NORMAL_DISPATCH_RATIO); | ||
|
||
| pub RuntimeBlockWeights: BlockWeights = BlockWeights::builder() | ||
| .base_block(BlockExecutionWeight::get()) | ||
| .for_class(DispatchClass::all(), |weights| { | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -60,7 +60,6 @@ use std::{ | |
| /// Maximum blocks per response. | ||
| pub(crate) const MAX_BLOCKS_IN_RESPONSE: usize = 128; | ||
|
|
||
| const MAX_BODY_BYTES: usize = 8 * 1024 * 1024; | ||
| const MAX_NUMBER_OF_SAME_REQUESTS_PER_PEER: usize = 2; | ||
|
|
||
| mod rep { | ||
|
|
@@ -442,11 +441,15 @@ where | |
| }; | ||
|
|
||
| let new_total_size = total_size + | ||
| block_data.header.len() + | ||
| block_data.body.iter().map(|ex| ex.len()).sum::<usize>() + | ||
| block_data.indexed_body.iter().map(|ex| ex.len()).sum::<usize>(); | ||
| block_data.indexed_body.iter().map(|ex| ex.len()).sum::<usize>() + | ||
| block_data.justification.len() + | ||
| block_data.justifications.len(); | ||
|
Comment on lines
+444
to
+448
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Why do you have to go through these manually instead of something like
Member
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Good question. Just kept it as it was before. |
||
|
|
||
| // Send at least one block, but make sure to not exceed the limit. | ||
| if !blocks.is_empty() && new_total_size > MAX_BODY_BYTES { | ||
| // Reserve 20 KiB for protocol overhead. | ||
| if !blocks.is_empty() && new_total_size > (MAX_RESPONSE_SIZE as usize - 20 * 1024) { | ||
| break | ||
| } | ||
|
|
||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -1331,3 +1331,48 @@ async fn syncs_huge_blocks() { | |
| assert_eq!(net.peer(0).client.info().best_number, 33); | ||
| assert_eq!(net.peer(1).client.info().best_number, 33); | ||
| } | ||
|
|
||
| /// Test syncing 512 blocks with ~1.2 MiB headers (empty bodies) to test large header handling. | ||
|
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I see 900 KiB digests, where are the other 300 KiB coming from in the header? |
||
| #[tokio::test(flavor = "multi_thread", worker_threads = 2)] | ||
| async fn syncs_blocks_with_large_headers() { | ||
| use sc_consensus::ForkChoiceStrategy; | ||
| use sp_runtime::{ | ||
| generic::{BlockId, DigestItem}, | ||
| Digest, | ||
| }; | ||
|
|
||
| sp_tracing::try_init_simple(); | ||
| let mut net = TestNet::new(2); | ||
|
|
||
| { | ||
| let peer = net.peer(0); | ||
| let best_hash = peer.client.info().best_hash; | ||
| peer.generate_blocks_at_with_inherent_digests( | ||
| BlockId::Hash(best_hash), | ||
| 512, | ||
| BlockOrigin::Own, | ||
| |builder| builder.build().unwrap().block, | ||
| |i| { | ||
| let large_data = vec![i as u8; 900 * 1024]; | ||
| Digest { logs: vec![DigestItem::PreRuntime(*b"test", large_data)] } | ||
| }, | ||
| false, | ||
| true, | ||
| true, | ||
| ForkChoiceStrategy::LongestChain, | ||
| ); | ||
| assert_eq!(peer.client.info().best_number, 512); | ||
| } | ||
|
|
||
| net.run_until_sync().await; | ||
|
|
||
| assert_eq!(net.peer(1).client.info().best_number, 512); | ||
| assert!(net.peers()[0].blockchain_canon_equals(&net.peers()[1])); | ||
|
|
||
| net.add_full_peer(); | ||
|
|
||
| net.run_until_sync().await; | ||
|
|
||
| assert_eq!(net.peer(2).client.info().best_number, 512); | ||
| assert!(net.peers()[0].blockchain_canon_equals(&net.peers()[2])); | ||
| } | ||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -997,6 +997,9 @@ pub mod pallet { | |
| pub type BlockWeight<T: Config> = StorageValue<_, ConsumedWeight, ValueQuery>; | ||
|
|
||
| /// Total length (in bytes) for all extrinsics put together, for the current block. | ||
| /// | ||
| /// In contrast to its name it also includes the header overhead and digest size to accurately | ||
| /// track block size. | ||
|
||
| #[pallet::storage] | ||
| #[pallet::whitelist_storage] | ||
| pub type AllExtrinsicsLen<T: Config> = StorageValue<_, u32>; | ||
|
|
@@ -1929,6 +1932,28 @@ impl<T: Config> Pallet<T> { | |
|
|
||
| // Remove previous block data from storage | ||
| BlockWeight::<T>::kill(); | ||
|
|
||
| // Account for digest size and empty header overhead in block length. | ||
| // This ensures block limits consider the full block size, not just extrinsics. | ||
| let digest_size = digest.encoded_size(); | ||
| let empty_header = <<T as Config>::Block as traits::Block>::Header::new( | ||
| *number, | ||
| Default::default(), | ||
| Default::default(), | ||
| *parent_hash, | ||
| Default::default(), | ||
| ); | ||
| let empty_header_size = empty_header.encoded_size(); | ||
|
Comment on lines
+1939
to
+1946
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I guess we cannot hard-code an upper bound and have an integrity test for this encoded size since it is generic over the runtime types? hm...
Member
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Yeah not super perfect, but also not that much better solvable. |
||
| let overhead = digest_size.saturating_add(empty_header_size) as u32; | ||
| AllExtrinsicsLen::<T>::put(overhead); | ||
|
|
||
| // Ensure inherent digests don't exceed 20% of the max block size. | ||
|
||
| let max_block_len = *T::BlockLength::get().max.get(DispatchClass::Mandatory); | ||
| let max_digest_len = max_block_len / 5; | ||
| assert!( | ||
| digest_size <= max_digest_len as usize, | ||
| "Inherent digest size ({digest_size}) exceeds 20% of max block length ({max_digest_len})" | ||
| ); | ||
| } | ||
|
|
||
| /// Initialize [`INTRABLOCK_ENTROPY`](well_known_keys::INTRABLOCK_ENTROPY). | ||
|
|
@@ -2041,6 +2066,9 @@ impl<T: Config> Pallet<T> { | |
|
|
||
| /// Deposits a log and ensures it matches the block's log data. | ||
| pub fn deposit_log(item: generic::DigestItem) { | ||
| AllExtrinsicsLen::<T>::mutate(|len| { | ||
| *len = Some(len.unwrap_or(0).saturating_add(item.encoded_size() as u32)); | ||
| }); | ||
| <Digest<T>>::append(item); | ||
| } | ||
|
|
||
|
|
||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Can we use MAX_HEAD_DATA_SIZE + 1 here instead of this hardcoded value?