Skip to content

Commit 79190ad

Browse files
committed
DRY the pre-startup ChannelMonitorUpdate handling
This moves the common `if during_startup { push background event } else { apply ChannelMonitorUpdate }` pattern by simply inlining it in `handle_new_monitor_update`.
1 parent 41f703c commit 79190ad

File tree

1 file changed

+44
-72
lines changed

1 file changed

+44
-72
lines changed

lightning/src/ln/channelmanager.rs

+44-72
Original file line numberDiff line numberDiff line change
@@ -2973,31 +2973,9 @@ macro_rules! handle_error {
29732973
/// [`ChannelMonitor`]/channel funding transaction) to begin with.
29742974
macro_rules! locked_close_channel {
29752975
($self: ident, $peer_state: expr, $channel_context: expr, $shutdown_res_mut: expr) => {{
2976-
if let Some((counterparty_node_id, funding_txo, channel_id, update)) = $shutdown_res_mut.monitor_update.take() {
2977-
if $self.background_events_processed_since_startup.load(Ordering::Acquire) {
2978-
handle_new_monitor_update!($self, funding_txo, update, $peer_state,
2979-
$channel_context, REMAIN_LOCKED_UPDATE_ACTIONS_PROCESSED_LATER);
2980-
} else {
2981-
// We want to track the in-flight update both in `in_flight_monitor_updates` and in
2982-
// `pending_background_events` to avoid a race condition during
2983-
// `pending_background_events` processing where we complete one
2984-
// `ChannelMonitorUpdate` (but there are more pending as background events) but we
2985-
// conclude that all pending `ChannelMonitorUpdate`s have completed and its safe to
2986-
// run post-completion actions. We could work around that with some effort, but its
2987-
// simpler to just track updates twice.
2988-
let in_flight_updates = $peer_state.in_flight_monitor_updates.entry(funding_txo)
2989-
.or_insert_with(Vec::new);
2990-
if !in_flight_updates.contains(&update) {
2991-
in_flight_updates.push(update.clone());
2992-
}
2993-
let event = BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
2994-
counterparty_node_id,
2995-
funding_txo,
2996-
channel_id,
2997-
update,
2998-
};
2999-
$self.pending_background_events.lock().unwrap().push(event);
3000-
}
2976+
if let Some((_, funding_txo, _, update)) = $shutdown_res_mut.monitor_update.take() {
2977+
handle_new_monitor_update!($self, funding_txo, update, $peer_state,
2978+
$channel_context, REMAIN_LOCKED_UPDATE_ACTIONS_PROCESSED_LATER);
30012979
}
30022980
// If there's a possibility that we need to generate further monitor updates for this
30032981
// channel, we need to store the last update_id of it. However, we don't want to insert
@@ -3309,8 +3287,8 @@ macro_rules! handle_new_monitor_update {
33093287
};
33103288
(
33113289
$self: ident, $funding_txo: expr, $update: expr, $peer_state: expr, $logger: expr,
3312-
$chan_id: expr, $in_flight_updates: ident, $update_idx: ident, _internal_outer,
3313-
$completed: expr
3290+
$chan_id: expr, $counterparty_node_id: expr, $in_flight_updates: ident, $update_idx: ident,
3291+
_internal_outer, $completed: expr
33143292
) => { {
33153293
$in_flight_updates = $peer_state.in_flight_monitor_updates.entry($funding_txo)
33163294
.or_insert_with(Vec::new);
@@ -3322,31 +3300,55 @@ macro_rules! handle_new_monitor_update {
33223300
$in_flight_updates.push($update);
33233301
$in_flight_updates.len() - 1
33243302
});
3325-
let update_res = $self.chain_monitor.update_channel($funding_txo, &$in_flight_updates[$update_idx]);
3326-
handle_new_monitor_update!($self, update_res, $logger, $chan_id, _internal, $completed)
3303+
if $self.background_events_processed_since_startup.load(Ordering::Acquire) {
3304+
let update_res = $self.chain_monitor.update_channel($funding_txo, &$in_flight_updates[$update_idx]);
3305+
handle_new_monitor_update!($self, update_res, $logger, $chan_id, _internal, $completed)
3306+
} else {
3307+
// We blindly assume that the ChannelMonitorUpdate will be regenerated on startup if we
3308+
// fail to persist it. This is a fairly safe assumption, however, since anything we do
3309+
// during the startup sequence should be replayed exactly if we immediately crash.
3310+
let event = BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
3311+
counterparty_node_id: $counterparty_node_id,
3312+
funding_txo: $funding_txo,
3313+
channel_id: $chan_id,
3314+
update: $in_flight_updates[$update_idx].clone(),
3315+
};
3316+
// We want to track the in-flight update both in `in_flight_monitor_updates` and in
3317+
// `pending_background_events` to avoid a race condition during
3318+
// `pending_background_events` processing where we complete one
3319+
// `ChannelMonitorUpdate` (but there are more pending as background events) but we
3320+
// conclude that all pending `ChannelMonitorUpdate`s have completed and its safe to
3321+
// run post-completion actions.
3322+
// We could work around that with some effort, but its simpler to just track updates
3323+
// twice.
3324+
$self.pending_background_events.lock().unwrap().push(event);
3325+
false
3326+
}
33273327
} };
33283328
(
33293329
$self: ident, $funding_txo: expr, $update: expr, $peer_state: expr, $chan_context: expr,
33303330
REMAIN_LOCKED_UPDATE_ACTIONS_PROCESSED_LATER
33313331
) => { {
33323332
let logger = WithChannelContext::from(&$self.logger, &$chan_context, None);
33333333
let chan_id = $chan_context.channel_id();
3334+
let counterparty_node_id = $chan_context.get_counterparty_node_id();
33343335
let in_flight_updates;
33353336
let idx;
33363337
handle_new_monitor_update!($self, $funding_txo, $update, $peer_state, logger, chan_id,
3337-
in_flight_updates, idx, _internal_outer,
3338+
counterparty_node_id, in_flight_updates, idx, _internal_outer,
33383339
{
33393340
let _ = in_flight_updates.remove(idx);
33403341
})
33413342
} };
33423343
(
33433344
$self: ident, $funding_txo: expr, $update: expr, $peer_state_lock: expr, $peer_state: expr,
3344-
$per_peer_state_lock: expr, $logger: expr, $channel_id: expr, POST_CHANNEL_CLOSE
3345+
$per_peer_state_lock: expr, $counterparty_node_id: expr, $channel_id: expr, POST_CHANNEL_CLOSE
33453346
) => { {
3347+
let logger = WithContext::from(&$self.logger, Some($counterparty_node_id), Some($channel_id), None);
33463348
let in_flight_updates;
33473349
let idx;
3348-
handle_new_monitor_update!($self, $funding_txo, $update, $peer_state, $logger,
3349-
$channel_id, in_flight_updates, idx, _internal_outer,
3350+
handle_new_monitor_update!($self, $funding_txo, $update, $peer_state, logger,
3351+
$channel_id, $counterparty_node_id, in_flight_updates, idx, _internal_outer,
33503352
{
33513353
let _ = in_flight_updates.remove(idx);
33523354
if in_flight_updates.is_empty() {
@@ -3366,10 +3368,11 @@ macro_rules! handle_new_monitor_update {
33663368
) => { {
33673369
let logger = WithChannelContext::from(&$self.logger, &$chan.context, None);
33683370
let chan_id = $chan.context.channel_id();
3371+
let counterparty_node_id = $chan.context.get_counterparty_node_id();
33693372
let in_flight_updates;
33703373
let idx;
33713374
handle_new_monitor_update!($self, $funding_txo, $update, $peer_state, logger, chan_id,
3372-
in_flight_updates, idx, _internal_outer,
3375+
counterparty_node_id, in_flight_updates, idx, _internal_outer,
33733376
{
33743377
let _ = in_flight_updates.remove(idx);
33753378
if in_flight_updates.is_empty() && $chan.blocked_monitor_updates_pending() == 0 {
@@ -3968,11 +3971,10 @@ where
39683971
},
39693972
hash_map::Entry::Vacant(_) => {},
39703973
}
3971-
let logger = WithContext::from(&self.logger, Some(counterparty_node_id), Some(channel_id), None);
39723974

39733975
handle_new_monitor_update!(
39743976
self, funding_txo, monitor_update, peer_state_lock, peer_state, per_peer_state,
3975-
logger, channel_id, POST_CHANNEL_CLOSE
3977+
counterparty_node_id, channel_id, POST_CHANNEL_CLOSE
39763978
);
39773979
}
39783980

@@ -7080,7 +7082,6 @@ where
70807082
let peer_state = &mut **peer_state_lock;
70817083
if let hash_map::Entry::Occupied(mut chan_phase_entry) = peer_state.channel_by_id.entry(chan_id) {
70827084
if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
7083-
let counterparty_node_id = chan.context.get_counterparty_node_id();
70847085
let logger = WithChannelContext::from(&self.logger, &chan.context, None);
70857086
let fulfill_res = chan.get_update_fulfill_htlc_and_commit(prev_hop.htlc_id, payment_preimage, payment_info, &&logger);
70867087

@@ -7095,21 +7096,8 @@ where
70957096
if let Some(raa_blocker) = raa_blocker_opt {
70967097
peer_state.actions_blocking_raa_monitor_updates.entry(chan_id).or_insert_with(Vec::new).push(raa_blocker);
70977098
}
7098-
if !during_init {
7099-
handle_new_monitor_update!(self, prev_hop.funding_txo, monitor_update, peer_state_opt,
7100-
peer_state, per_peer_state, chan);
7101-
} else {
7102-
// If we're running during init we cannot update a monitor directly -
7103-
// they probably haven't actually been loaded yet. Instead, push the
7104-
// monitor update as a background event.
7105-
self.pending_background_events.lock().unwrap().push(
7106-
BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
7107-
counterparty_node_id,
7108-
funding_txo: prev_hop.funding_txo,
7109-
channel_id: prev_hop.channel_id,
7110-
update: monitor_update.clone(),
7111-
});
7112-
}
7099+
handle_new_monitor_update!(self, prev_hop.funding_txo, monitor_update, peer_state_opt,
7100+
peer_state, per_peer_state, chan);
71137101
}
71147102
UpdateFulfillCommitFetch::DuplicateClaim {} => {
71157103
let (action_opt, raa_blocker_opt) = completion_action(None, true);
@@ -7234,26 +7222,10 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
72347222
peer_state.monitor_update_blocked_actions.entry(chan_id).or_insert(Vec::new()).push(action);
72357223
}
72367224

7237-
if !during_init {
7238-
handle_new_monitor_update!(self, prev_hop.funding_txo, preimage_update, peer_state, peer_state, per_peer_state, logger, chan_id, POST_CHANNEL_CLOSE);
7239-
} else {
7240-
// If we're running during init we cannot update a monitor directly - they probably
7241-
// haven't actually been loaded yet. Instead, push the monitor update as a background
7242-
// event.
7243-
7244-
let in_flight_updates = peer_state.in_flight_monitor_updates
7245-
.entry(prev_hop.funding_txo)
7246-
.or_insert_with(Vec::new);
7247-
in_flight_updates.push(preimage_update.clone());
7248-
7249-
let event = BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
7250-
counterparty_node_id,
7251-
funding_txo: prev_hop.funding_txo,
7252-
channel_id: prev_hop.channel_id,
7253-
update: preimage_update,
7254-
};
7255-
self.pending_background_events.lock().unwrap().push(event);
7256-
}
7225+
handle_new_monitor_update!(
7226+
self, prev_hop.funding_txo, preimage_update, peer_state, peer_state, per_peer_state,
7227+
counterparty_node_id, chan_id, POST_CHANNEL_CLOSE
7228+
);
72577229
}
72587230

72597231
fn finalize_claims(&self, sources: Vec<HTLCSource>) {

0 commit comments

Comments
 (0)