-
Notifications
You must be signed in to change notification settings - Fork 4
feat(l1): add L1 reorg filtering via orphaned hashes and query-side e… #1095
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,13 @@ | ||
| -- Migration 019: Create orphaned_l1_hashes table and index | ||
|
|
||
| CREATE TABLE IF NOT EXISTS ${DB}.orphaned_l1_hashes ( | ||
| block_hash FixedString(32), | ||
| l1_block_number UInt64, | ||
| inserted_at DateTime64(3) DEFAULT now64() | ||
| ) ENGINE = MergeTree() | ||
| ORDER BY (l1_block_number, block_hash); | ||
|
|
||
| -- orphaned_l1_hashes: lookups by block_hash | ||
| ALTER TABLE ${DB}.orphaned_l1_hashes | ||
| ADD INDEX IF NOT EXISTS idx_orphaned_l1_block_hash_bf block_hash TYPE bloom_filter(0.01) GRANULARITY 1; | ||
| ALTER TABLE ${DB}.orphaned_l1_hashes MATERIALIZE INDEX idx_orphaned_l1_block_hash_bf; |
| Original file line number | Diff line number | Diff line change | ||||
|---|---|---|---|---|---|---|
|
|
@@ -32,6 +32,7 @@ pub struct ProcessorDriver { | |||||
| clickhouse_reader: Option<ClickhouseReader>, | ||||||
| extractor: Extractor, | ||||||
| reorg_detector: ReorgDetector, | ||||||
| l1_reorg_detector: ReorgDetector, | ||||||
| last_l2_header: Option<(u64, Address)>, | ||||||
| enable_db_writes: bool, | ||||||
| incident_client: IncidentClient, | ||||||
|
|
@@ -56,6 +57,7 @@ struct ProcessorComponents { | |||||
| clickhouse_reader: Option<ClickhouseReader>, | ||||||
| extractor: Extractor, | ||||||
| reorg_detector: ReorgDetector, | ||||||
| l1_reorg_detector: ReorgDetector, | ||||||
| last_l2_header: Option<(u64, Address)>, | ||||||
| enable_db_writes: bool, | ||||||
| processed_l2_headers: VecDeque<BlockHash>, | ||||||
|
|
@@ -133,6 +135,7 @@ impl ProcessorDriver { | |||||
|
|
||||||
| // Initialize reorg detector | ||||||
| let reorg_detector = ReorgDetector::new(); | ||||||
| let l1_reorg_detector = ReorgDetector::new(); | ||||||
|
|
||||||
| // init incident client and component IDs if monitors are enabled | ||||||
| let ( | ||||||
|
|
@@ -168,6 +171,7 @@ impl ProcessorDriver { | |||||
| clickhouse_reader, | ||||||
| extractor, | ||||||
| reorg_detector, | ||||||
| l1_reorg_detector, | ||||||
| last_l2_header: None, | ||||||
| enable_db_writes: opts.enable_db_writes, | ||||||
| incident_client, | ||||||
|
|
@@ -246,6 +250,7 @@ impl ProcessorDriver { | |||||
| clickhouse_reader, | ||||||
| extractor, | ||||||
| mut reorg_detector, | ||||||
| mut l1_reorg_detector, | ||||||
| mut last_l2_header, | ||||||
| enable_db_writes, | ||||||
| mut processed_l2_headers, | ||||||
|
|
@@ -258,6 +263,7 @@ impl ProcessorDriver { | |||||
| clickhouse_reader, | ||||||
| extractor, | ||||||
| &mut reorg_detector, | ||||||
| &mut l1_reorg_detector, | ||||||
| &mut last_l2_header, | ||||||
| enable_db_writes, | ||||||
| &mut processed_l2_headers, | ||||||
|
|
@@ -274,6 +280,7 @@ impl ProcessorDriver { | |||||
| clickhouse_reader: self.clickhouse_reader, | ||||||
| extractor: self.extractor, | ||||||
| reorg_detector: self.reorg_detector, | ||||||
| l1_reorg_detector: self.l1_reorg_detector, | ||||||
| last_l2_header: self.last_l2_header, | ||||||
| enable_db_writes: self.enable_db_writes, | ||||||
| processed_l2_headers: self.processed_l2_headers, | ||||||
|
|
@@ -362,6 +369,7 @@ impl ProcessorDriver { | |||||
| clickhouse_reader: Option<ClickhouseReader>, | ||||||
| extractor: Extractor, | ||||||
| reorg_detector: &mut ReorgDetector, | ||||||
| l1_reorg_detector: &mut ReorgDetector, | ||||||
| last_l2_header: &mut Option<(u64, Address)>, | ||||||
| enable_db_writes: bool, | ||||||
| processed_l2_headers: &mut VecDeque<BlockHash>, | ||||||
|
|
@@ -403,6 +411,7 @@ impl ProcessorDriver { | |||||
| &clickhouse_reader, | ||||||
| &extractor, | ||||||
| reorg_detector, | ||||||
| l1_reorg_detector, | ||||||
| last_l2_header, | ||||||
| enable_db_writes, | ||||||
| processed_l2_headers, | ||||||
|
|
@@ -437,6 +446,7 @@ impl ProcessorDriver { | |||||
| clickhouse_reader: &Option<ClickhouseReader>, | ||||||
| extractor: &Extractor, | ||||||
| reorg_detector: &mut ReorgDetector, | ||||||
| l1_reorg_detector: &mut ReorgDetector, | ||||||
| last_l2_header: &mut Option<(u64, Address)>, | ||||||
| enable_db_writes: bool, | ||||||
| processed_l2_headers: &mut VecDeque<BlockHash>, | ||||||
|
|
@@ -452,6 +462,7 @@ impl ProcessorDriver { | |||||
| clickhouse_reader, | ||||||
| extractor, | ||||||
| reorg_detector, | ||||||
| l1_reorg_detector, | ||||||
| last_l2_header, | ||||||
| enable_db_writes, | ||||||
| processed_l2_headers, | ||||||
|
|
@@ -506,6 +517,7 @@ impl ProcessorDriver { | |||||
| clickhouse_reader: &Option<ClickhouseReader>, | ||||||
| extractor: &Extractor, | ||||||
| reorg_detector: &mut ReorgDetector, | ||||||
| l1_reorg_detector: &mut ReorgDetector, | ||||||
| last_l2_header: &mut Option<(u64, Address)>, | ||||||
| enable_db_writes: bool, | ||||||
| processed_l2_headers: &mut VecDeque<BlockHash>, | ||||||
|
|
@@ -521,6 +533,7 @@ impl ProcessorDriver { | |||||
| clickhouse_reader, | ||||||
| extractor, | ||||||
| reorg_detector, | ||||||
| l1_reorg_detector, | ||||||
| last_l2_header, | ||||||
| processed_l2_headers, | ||||||
| kv_store, | ||||||
|
|
@@ -542,6 +555,7 @@ impl ProcessorDriver { | |||||
| clickhouse_reader: &Option<ClickhouseReader>, | ||||||
| extractor: &Extractor, | ||||||
| reorg_detector: &mut ReorgDetector, | ||||||
| _l1_reorg_detector: &mut ReorgDetector, | ||||||
| last_l2_header: &mut Option<(u64, Address)>, | ||||||
| processed_l2_headers: &mut VecDeque<BlockHash>, | ||||||
| kv_store: Option<&kv::Store>, | ||||||
|
|
@@ -601,7 +615,14 @@ impl ProcessorDriver { | |||||
| Self::handle_batches_verified_event(writer, extractor, wrapper.clone()).await | ||||||
| } | ||||||
| TaikoEvent::L1Header(header) => { | ||||||
| Self::handle_l1_header_event(writer, extractor, header.clone()).await | ||||||
| Self::handle_l1_header_event( | ||||||
| writer, | ||||||
| clickhouse_reader, | ||||||
| _l1_reorg_detector, | ||||||
| extractor, | ||||||
| header.clone(), | ||||||
| ) | ||||||
| .await | ||||||
| } | ||||||
| TaikoEvent::L2Header(header) => { | ||||||
| Self::handle_l2_header( | ||||||
|
|
@@ -849,9 +870,16 @@ impl ProcessorDriver { | |||||
| /// Handle `L1Header` event with database insertion and preconf data processing | ||||||
| async fn handle_l1_header_event( | ||||||
| writer: &ClickhouseWriter, | ||||||
| clickhouse_reader: &Option<ClickhouseReader>, | ||||||
| l1_reorg_detector: &mut ReorgDetector, | ||||||
| extractor: &Extractor, | ||||||
| header: primitives::headers::L1Header, | ||||||
| ) -> Result<()> { | ||||||
| // Detect L1 reorgs similar to L2 approach | ||||||
| let old_head = l1_reorg_detector.head_number(); | ||||||
| let reorg_result = | ||||||
| l1_reorg_detector.on_new_block_with_hash(header.number, B256::from(*header.hash)); | ||||||
|
|
||||||
| // Insert L1 header | ||||||
| Self::with_db_error_context( | ||||||
| writer.insert_l1_header(&header), | ||||||
|
|
@@ -865,6 +893,57 @@ impl ProcessorDriver { | |||||
| // Process preconfirmation data (same as original driver) | ||||||
| Self::process_preconf_data(writer, extractor, &header).await; | ||||||
|
|
||||||
| // If an L1 reorg is detected, record orphaned L1 block hashes for the reorged range | ||||||
| if let Some((depth, orphaned_hash)) = reorg_result { | ||||||
| // Handle one-block reorg orphaned hash | ||||||
| if let Some(hash) = orphaned_hash { | ||||||
| if let Err(e) = writer | ||||||
| .insert_orphaned_l1_hashes(&[(HashBytes::from(hash), header.number)]) | ||||||
| .await | ||||||
| { | ||||||
| tracing::error!( | ||||||
| block_number = header.number, | ||||||
| orphaned_hash = ?hash, | ||||||
| err = %e, | ||||||
| "Failed to insert orphaned L1 hash" | ||||||
| ); | ||||||
| } else { | ||||||
| info!(block_number = header.number, orphaned_hash = ?hash, "Inserted orphaned L1 hash"); | ||||||
|
||||||
| info!(block_number = header.number, orphaned_hash = ?hash, "Inserted orphaned L1 hash"); | |
| tracing::info!(block_number = header.number, orphaned_hash = ?hash, "Inserted orphaned L1 hash"); |
Copilot
AI
Aug 11, 2025
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
The logic for calculating orphaned block range appears incorrect. For a reorg where new_head < old_head, the orphaned blocks should be from new_head + 1 to old_head, but new_head.saturating_add(1) will be greater than old_head, making the range invalid.
| let orphaned_start = new_head.saturating_add(1); | |
| let orphaned_start = new_head + 1; |
Copilot
AI
Aug 11, 2025
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Using info! for this logging is inconsistent with the error case above which uses tracing::error!. Consider using tracing::info! for consistency.
| info!( | |
| tracing::info!( |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Direct string interpolation of user-provided block numbers into SQL query creates potential SQL injection vulnerability. Consider using parameterized queries or proper escaping.