Skip to content

Commit 76b3a99

Browse files
committed
DRY the pre-startup ChannelMonitorUpdate handling
This moves the common `if during_startup { push background event } else { apply ChannelMonitorUpdate }` pattern by simply inlining it in `handle_new_monitor_update`. It also ensures we always insert `ChannelMonitorUpdate`s in the pending updates set when we push the background event, avoiding a race where we push an update as a background event, then while its processing another update finishes and the post-update actions get run.
1 parent 8ae8d9f commit 76b3a99

File tree

1 file changed

+53
-75
lines changed

1 file changed

+53
-75
lines changed

lightning/src/ln/channelmanager.rs

+53-75
Original file line numberDiff line numberDiff line change
@@ -2952,31 +2952,9 @@ macro_rules! handle_error {
29522952
/// [`ChannelMonitor`]/channel funding transaction) to begin with.
29532953
macro_rules! locked_close_channel {
29542954
($self: ident, $peer_state: expr, $channel_context: expr, $shutdown_res_mut: expr) => {{
2955-
if let Some((counterparty_node_id, funding_txo, channel_id, update)) = $shutdown_res_mut.monitor_update.take() {
2956-
if $self.background_events_processed_since_startup.load(Ordering::Acquire) {
2957-
handle_new_monitor_update!($self, funding_txo, update, $peer_state,
2958-
$channel_context, REMAIN_LOCKED_UPDATE_ACTIONS_PROCESSED_LATER);
2959-
} else {
2960-
// We want to track the in-flight update both in `in_flight_monitor_updates` and in
2961-
// `pending_background_events` to avoid a race condition during
2962-
// `pending_background_events` processing where we complete one
2963-
// `ChannelMonitorUpdate` (but there are more pending as background events) but we
2964-
// conclude that all pending `ChannelMonitorUpdate`s have completed and its safe to
2965-
// run post-completion actions. We could work around that with some effort, but its
2966-
// simpler to just track updates twice.
2967-
let in_flight_updates = $peer_state.in_flight_monitor_updates.entry(funding_txo)
2968-
.or_insert_with(Vec::new);
2969-
if !in_flight_updates.contains(&update) {
2970-
in_flight_updates.push(update.clone());
2971-
}
2972-
let event = BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
2973-
counterparty_node_id,
2974-
funding_txo,
2975-
channel_id,
2976-
update,
2977-
};
2978-
$self.pending_background_events.lock().unwrap().push(event);
2979-
}
2955+
if let Some((_, funding_txo, _, update)) = $shutdown_res_mut.monitor_update.take() {
2956+
handle_new_monitor_update!($self, funding_txo, update, $peer_state,
2957+
$channel_context, REMAIN_LOCKED_UPDATE_ACTIONS_PROCESSED_LATER);
29802958
}
29812959
// If there's a possibility that we need to generate further monitor updates for this
29822960
// channel, we need to store the last update_id of it. However, we don't want to insert
@@ -3305,8 +3283,8 @@ macro_rules! handle_new_monitor_update {
33053283
};
33063284
(
33073285
$self: ident, $funding_txo: expr, $update: expr, $peer_state: expr, $logger: expr,
3308-
$chan_id: expr, $in_flight_updates: ident, $update_idx: ident, _internal_outer,
3309-
$completed: expr
3286+
$chan_id: expr, $counterparty_node_id: expr, $in_flight_updates: ident, $update_idx: ident,
3287+
_internal_outer, $completed: expr
33103288
) => { {
33113289
$in_flight_updates = $peer_state.in_flight_monitor_updates.entry($funding_txo)
33123290
.or_insert_with(Vec::new);
@@ -3318,31 +3296,55 @@ macro_rules! handle_new_monitor_update {
33183296
$in_flight_updates.push($update);
33193297
$in_flight_updates.len() - 1
33203298
});
3321-
let update_res = $self.chain_monitor.update_channel($funding_txo, &$in_flight_updates[$update_idx]);
3322-
handle_new_monitor_update!($self, update_res, $logger, $chan_id, _internal, $completed)
3299+
if $self.background_events_processed_since_startup.load(Ordering::Acquire) {
3300+
let update_res = $self.chain_monitor.update_channel($funding_txo, &$in_flight_updates[$update_idx]);
3301+
handle_new_monitor_update!($self, update_res, $logger, $chan_id, _internal, $completed)
3302+
} else {
3303+
// We blindly assume that the ChannelMonitorUpdate will be regenerated on startup if we
3304+
// fail to persist it. This is a fairly safe assumption, however, since anything we do
3305+
// during the startup sequence should be replayed exactly if we immediately crash.
3306+
let event = BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
3307+
counterparty_node_id: $counterparty_node_id,
3308+
funding_txo: $funding_txo,
3309+
channel_id: $chan_id,
3310+
update: $in_flight_updates[$update_idx].clone(),
3311+
};
3312+
// We want to track the in-flight update both in `in_flight_monitor_updates` and in
3313+
// `pending_background_events` to avoid a race condition during
3314+
// `pending_background_events` processing where we complete one
3315+
// `ChannelMonitorUpdate` (but there are more pending as background events) but we
3316+
// conclude that all pending `ChannelMonitorUpdate`s have completed and its safe to
3317+
// run post-completion actions.
3318+
// We could work around that with some effort, but its simpler to just track updates
3319+
// twice.
3320+
$self.pending_background_events.lock().unwrap().push(event);
3321+
false
3322+
}
33233323
} };
33243324
(
33253325
$self: ident, $funding_txo: expr, $update: expr, $peer_state: expr, $chan_context: expr,
33263326
REMAIN_LOCKED_UPDATE_ACTIONS_PROCESSED_LATER
33273327
) => { {
33283328
let logger = WithChannelContext::from(&$self.logger, &$chan_context, None);
33293329
let chan_id = $chan_context.channel_id();
3330+
let counterparty_node_id = $chan_context.get_counterparty_node_id();
33303331
let in_flight_updates;
33313332
let idx;
33323333
handle_new_monitor_update!($self, $funding_txo, $update, $peer_state, logger, chan_id,
3333-
in_flight_updates, idx, _internal_outer,
3334+
counterparty_node_id, in_flight_updates, idx, _internal_outer,
33343335
{
33353336
let _ = in_flight_updates.remove(idx);
33363337
})
33373338
} };
33383339
(
33393340
$self: ident, $funding_txo: expr, $update: expr, $peer_state_lock: expr, $peer_state: expr,
3340-
$per_peer_state_lock: expr, $logger: expr, $channel_id: expr, POST_CHANNEL_CLOSE
3341+
$per_peer_state_lock: expr, $counterparty_node_id: expr, $channel_id: expr, POST_CHANNEL_CLOSE
33413342
) => { {
3343+
let logger = WithContext::from(&$self.logger, Some($counterparty_node_id), Some($channel_id), None);
33423344
let in_flight_updates;
33433345
let idx;
3344-
handle_new_monitor_update!($self, $funding_txo, $update, $peer_state, $logger,
3345-
$channel_id, in_flight_updates, idx, _internal_outer,
3346+
handle_new_monitor_update!($self, $funding_txo, $update, $peer_state, logger,
3347+
$channel_id, $counterparty_node_id, in_flight_updates, idx, _internal_outer,
33463348
{
33473349
let _ = in_flight_updates.remove(idx);
33483350
if in_flight_updates.is_empty() {
@@ -3362,10 +3364,11 @@ macro_rules! handle_new_monitor_update {
33623364
) => { {
33633365
let logger = WithChannelContext::from(&$self.logger, &$chan.context, None);
33643366
let chan_id = $chan.context.channel_id();
3367+
let counterparty_node_id = $chan.context.get_counterparty_node_id();
33653368
let in_flight_updates;
33663369
let idx;
33673370
handle_new_monitor_update!($self, $funding_txo, $update, $peer_state, logger, chan_id,
3368-
in_flight_updates, idx, _internal_outer,
3371+
counterparty_node_id, in_flight_updates, idx, _internal_outer,
33693372
{
33703373
let _ = in_flight_updates.remove(idx);
33713374
if in_flight_updates.is_empty() && $chan.blocked_monitor_updates_pending() == 0 {
@@ -3995,11 +3998,10 @@ where
39953998
},
39963999
hash_map::Entry::Vacant(_) => {},
39974000
}
3998-
let logger = WithContext::from(&self.logger, Some(counterparty_node_id), Some(channel_id), None);
39994001

40004002
handle_new_monitor_update!(
40014003
self, funding_txo, monitor_update, peer_state_lock, peer_state, per_peer_state,
4002-
logger, channel_id, POST_CHANNEL_CLOSE
4004+
counterparty_node_id, channel_id, POST_CHANNEL_CLOSE
40034005
);
40044006
}
40054007

@@ -7129,7 +7131,6 @@ where
71297131
let peer_state = &mut **peer_state_lock;
71307132
if let hash_map::Entry::Occupied(mut chan_phase_entry) = peer_state.channel_by_id.entry(chan_id) {
71317133
if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
7132-
let counterparty_node_id = chan.context.get_counterparty_node_id();
71337134
let logger = WithChannelContext::from(&self.logger, &chan.context, None);
71347135
let fulfill_res = chan.get_update_fulfill_htlc_and_commit(prev_hop.htlc_id, payment_preimage, payment_info, &&logger);
71357136

@@ -7144,21 +7145,8 @@ where
71447145
if let Some(raa_blocker) = raa_blocker_opt {
71457146
peer_state.actions_blocking_raa_monitor_updates.entry(chan_id).or_insert_with(Vec::new).push(raa_blocker);
71467147
}
7147-
if !during_init {
7148-
handle_new_monitor_update!(self, prev_hop.funding_txo, monitor_update, peer_state_opt,
7149-
peer_state, per_peer_state, chan);
7150-
} else {
7151-
// If we're running during init we cannot update a monitor directly -
7152-
// they probably haven't actually been loaded yet. Instead, push the
7153-
// monitor update as a background event.
7154-
self.pending_background_events.lock().unwrap().push(
7155-
BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
7156-
counterparty_node_id,
7157-
funding_txo: prev_hop.funding_txo,
7158-
channel_id: prev_hop.channel_id,
7159-
update: monitor_update.clone(),
7160-
});
7161-
}
7148+
handle_new_monitor_update!(self, prev_hop.funding_txo, monitor_update, peer_state_opt,
7149+
peer_state, per_peer_state, chan);
71627150
}
71637151
UpdateFulfillCommitFetch::DuplicateClaim {} => {
71647152
let (action_opt, raa_blocker_opt) = completion_action(None, true);
@@ -7273,26 +7261,10 @@ where
72737261
peer_state.monitor_update_blocked_actions.entry(chan_id).or_insert(Vec::new()).push(action);
72747262
}
72757263

7276-
if !during_init {
7277-
handle_new_monitor_update!(self, prev_hop.funding_txo, preimage_update, peer_state, peer_state, per_peer_state, logger, chan_id, POST_CHANNEL_CLOSE);
7278-
} else {
7279-
// If we're running during init we cannot update a monitor directly - they probably
7280-
// haven't actually been loaded yet. Instead, push the monitor update as a background
7281-
// event.
7282-
7283-
let in_flight_updates = peer_state.in_flight_monitor_updates
7284-
.entry(prev_hop.funding_txo)
7285-
.or_insert_with(Vec::new);
7286-
in_flight_updates.push(preimage_update.clone());
7287-
7288-
let event = BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
7289-
counterparty_node_id,
7290-
funding_txo: prev_hop.funding_txo,
7291-
channel_id: prev_hop.channel_id,
7292-
update: preimage_update,
7293-
};
7294-
self.pending_background_events.lock().unwrap().push(event);
7295-
}
7264+
handle_new_monitor_update!(
7265+
self, prev_hop.funding_txo, preimage_update, peer_state, peer_state, per_peer_state,
7266+
counterparty_node_id, chan_id, POST_CHANNEL_CLOSE
7267+
);
72967268
}
72977269

72987270
fn finalize_claims(&self, sources: Vec<HTLCSource>) {
@@ -13685,14 +13657,20 @@ where
1368513657
}
1368613658
}
1368713659
}
13660+
let mut per_peer_state = per_peer_state.get(counterparty_node_id)
13661+
.expect("If we have pending updates for a channel it has to have an entry")
13662+
.lock().unwrap();
1368813663
if updated_id {
13689-
per_peer_state.get(counterparty_node_id)
13690-
.expect("If we have pending updates for a channel it has to have an entry")
13691-
.lock().unwrap()
13664+
per_peer_state
1369213665
.closed_channel_monitor_update_ids.entry(*channel_id)
1369313666
.and_modify(|v| *v = cmp::max(update.update_id, *v))
1369413667
.or_insert(update.update_id);
1369513668
}
13669+
let in_flight_updates = per_peer_state.in_flight_monitor_updates
13670+
.entry(*funding_txo)
13671+
.or_insert_with(Vec::new);
13672+
debug_assert!(!in_flight_updates.iter().any(|upd| upd == update));
13673+
in_flight_updates.push(update.clone());
1369613674
}
1369713675
pending_background_events.push(new_event);
1369813676
}

0 commit comments

Comments
 (0)