@@ -344,6 +344,7 @@ impl CbfChainSource {
344344 retries,
345345 e,
346346 ) ;
347+ * restart_status. lock ( ) . unwrap ( ) = CbfRuntimeStatus :: Stopped ;
347348 break ;
348349 }
349350 log_error ! (
@@ -503,8 +504,8 @@ impl CbfChainSource {
503504
504505 /// Reset filter scan state to a clean baseline.
505506 ///
506- /// Called on both success and error paths in `run_filter_scan()` to ensure
507- /// no stale state leaks between scans.
507+ /// Called on error paths in `run_filter_scan()` to ensure no stale state
508+ /// leaks between scans. The success path performs inline cleanup instead .
508509 fn cleanup_scan_state ( & self ) {
509510 self . filter_skip_height . store ( 0 , Ordering :: Release ) ;
510511 self . watched_scripts . write ( ) . unwrap ( ) . clear ( ) ;
@@ -684,11 +685,20 @@ impl CbfChainSource {
684685 // created outputs and spent inputs), so we include every transaction
685686 // from matched blocks and let BDK determine relevance.
686687 let mut tx_update = TxUpdate :: default ( ) ;
688+ let per_request_timeout =
689+ Duration :: from_secs ( self . sync_config . timeouts_config . per_request_timeout_secs . into ( ) ) ;
687690 for ( height, block_hash) in & matched {
688- let indexed_block = requester. get_block ( * block_hash) . await . map_err ( |e| {
689- log_error ! ( self . logger, "Failed to fetch block {}: {:?}" , block_hash, e) ;
690- Error :: WalletOperationFailed
691- } ) ?;
691+ let indexed_block =
692+ tokio:: time:: timeout ( per_request_timeout, requester. get_block ( * block_hash) )
693+ . await
694+ . map_err ( |_| {
695+ log_error ! ( self . logger, "Timed out fetching block {}" , block_hash) ;
696+ Error :: WalletOperationFailed
697+ } ) ?
698+ . map_err ( |e| {
699+ log_error ! ( self . logger, "Failed to fetch block {}: {:?}" , block_hash, e) ;
700+ Error :: WalletOperationFailed
701+ } ) ?;
692702 let block = indexed_block. block ;
693703 let block_id = BlockId { height : * height, hash : block. header . block_hash ( ) } ;
694704 let conf_time =
@@ -797,12 +807,15 @@ impl CbfChainSource {
797807 // The compact block filter already matched our scripts (covering both
798808 // created outputs and spent inputs), so we confirm every transaction
799809 // from matched blocks and let LDK determine relevance.
810+ let per_request_timeout =
811+ Duration :: from_secs ( self . sync_config . timeouts_config . per_request_timeout_secs . into ( ) ) ;
800812 for ( height, block_hash) in & matched {
801813 confirm_block_transactions (
802814 & requester,
803815 * block_hash,
804816 * height,
805817 & confirmables,
818+ per_request_timeout,
806819 & self . logger ,
807820 )
808821 . await ?;
@@ -1181,12 +1194,18 @@ fn update_node_metrics_timestamp(
11811194/// Fetch a block by hash and call `transactions_confirmed` on each confirmable.
11821195async fn confirm_block_transactions (
11831196 requester : & Requester , block_hash : BlockHash , height : u32 ,
1184- confirmables : & [ & ( dyn Confirm + Sync + Send ) ] , logger : & Logger ,
1197+ confirmables : & [ & ( dyn Confirm + Sync + Send ) ] , per_request_timeout : Duration , logger : & Logger ,
11851198) -> Result < ( ) , Error > {
1186- let indexed_block = requester. get_block ( block_hash) . await . map_err ( |e| {
1187- log_error ! ( logger, "Failed to fetch block {}: {:?}" , block_hash, e) ;
1188- Error :: TxSyncFailed
1189- } ) ?;
1199+ let indexed_block = tokio:: time:: timeout ( per_request_timeout, requester. get_block ( block_hash) )
1200+ . await
1201+ . map_err ( |_| {
1202+ log_error ! ( logger, "Timed out fetching block {}" , block_hash) ;
1203+ Error :: TxSyncFailed
1204+ } ) ?
1205+ . map_err ( |e| {
1206+ log_error ! ( logger, "Failed to fetch block {}: {:?}" , block_hash, e) ;
1207+ Error :: TxSyncFailed
1208+ } ) ?;
11901209 let block = & indexed_block. block ;
11911210 let header = & block. header ;
11921211 let txdata: Vec < ( usize , & Transaction ) > = block. txdata . iter ( ) . enumerate ( ) . collect ( ) ;
0 commit comments