@@ -2952,31 +2952,9 @@ macro_rules! handle_error {
2952
2952
/// [`ChannelMonitor`]/channel funding transaction) to begin with.
2953
2953
macro_rules! locked_close_channel {
2954
2954
($self: ident, $peer_state: expr, $channel_context: expr, $shutdown_res_mut: expr) => {{
2955
- if let Some((counterparty_node_id, funding_txo, channel_id, update)) = $shutdown_res_mut.monitor_update.take() {
2956
- if $self.background_events_processed_since_startup.load(Ordering::Acquire) {
2957
- handle_new_monitor_update!($self, funding_txo, update, $peer_state,
2958
- $channel_context, REMAIN_LOCKED_UPDATE_ACTIONS_PROCESSED_LATER);
2959
- } else {
2960
- // We want to track the in-flight update both in `in_flight_monitor_updates` and in
2961
- // `pending_background_events` to avoid a race condition during
2962
- // `pending_background_events` processing where we complete one
2963
- // `ChannelMonitorUpdate` (but there are more pending as background events) but we
2964
- // conclude that all pending `ChannelMonitorUpdate`s have completed and its safe to
2965
- // run post-completion actions. We could work around that with some effort, but its
2966
- // simpler to just track updates twice.
2967
- let in_flight_updates = $peer_state.in_flight_monitor_updates.entry(funding_txo)
2968
- .or_insert_with(Vec::new);
2969
- if !in_flight_updates.contains(&update) {
2970
- in_flight_updates.push(update.clone());
2971
- }
2972
- let event = BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
2973
- counterparty_node_id,
2974
- funding_txo,
2975
- channel_id,
2976
- update,
2977
- };
2978
- $self.pending_background_events.lock().unwrap().push(event);
2979
- }
2955
+ if let Some((_, funding_txo, _, update)) = $shutdown_res_mut.monitor_update.take() {
2956
+ handle_new_monitor_update!($self, funding_txo, update, $peer_state,
2957
+ $channel_context, REMAIN_LOCKED_UPDATE_ACTIONS_PROCESSED_LATER);
2980
2958
}
2981
2959
// If there's a possibility that we need to generate further monitor updates for this
2982
2960
// channel, we need to store the last update_id of it. However, we don't want to insert
@@ -3305,8 +3283,8 @@ macro_rules! handle_new_monitor_update {
3305
3283
};
3306
3284
(
3307
3285
$self: ident, $funding_txo: expr, $update: expr, $peer_state: expr, $logger: expr,
3308
- $chan_id: expr, $in_flight_updates: ident, $update_idx: ident, _internal_outer ,
3309
- $completed: expr
3286
+ $chan_id: expr, $counterparty_node_id: expr, $ in_flight_updates: ident, $update_idx: ident,
3287
+ _internal_outer, $completed: expr
3310
3288
) => { {
3311
3289
$in_flight_updates = $peer_state.in_flight_monitor_updates.entry($funding_txo)
3312
3290
.or_insert_with(Vec::new);
@@ -3318,31 +3296,55 @@ macro_rules! handle_new_monitor_update {
3318
3296
$in_flight_updates.push($update);
3319
3297
$in_flight_updates.len() - 1
3320
3298
});
3321
- let update_res = $self.chain_monitor.update_channel($funding_txo, &$in_flight_updates[$update_idx]);
3322
- handle_new_monitor_update!($self, update_res, $logger, $chan_id, _internal, $completed)
3299
+ if $self.background_events_processed_since_startup.load(Ordering::Acquire) {
3300
+ let update_res = $self.chain_monitor.update_channel($funding_txo, &$in_flight_updates[$update_idx]);
3301
+ handle_new_monitor_update!($self, update_res, $logger, $chan_id, _internal, $completed)
3302
+ } else {
3303
+ // We blindly assume that the ChannelMonitorUpdate will be regenerated on startup if we
3304
+ // fail to persist it. This is a fairly safe assumption, however, since anything we do
3305
+ // during the startup sequence should be replayed exactly if we immediately crash.
3306
+ let event = BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
3307
+ counterparty_node_id: $counterparty_node_id,
3308
+ funding_txo: $funding_txo,
3309
+ channel_id: $chan_id,
3310
+ update: $in_flight_updates[$update_idx].clone(),
3311
+ };
3312
+ // We want to track the in-flight update both in `in_flight_monitor_updates` and in
3313
+ // `pending_background_events` to avoid a race condition during
3314
+ // `pending_background_events` processing where we complete one
3315
+ // `ChannelMonitorUpdate` (but there are more pending as background events) but we
3316
+ // conclude that all pending `ChannelMonitorUpdate`s have completed and its safe to
3317
+ // run post-completion actions.
3318
+ // We could work around that with some effort, but its simpler to just track updates
3319
+ // twice.
3320
+ $self.pending_background_events.lock().unwrap().push(event);
3321
+ false
3322
+ }
3323
3323
} };
3324
3324
(
3325
3325
$self: ident, $funding_txo: expr, $update: expr, $peer_state: expr, $chan_context: expr,
3326
3326
REMAIN_LOCKED_UPDATE_ACTIONS_PROCESSED_LATER
3327
3327
) => { {
3328
3328
let logger = WithChannelContext::from(&$self.logger, &$chan_context, None);
3329
3329
let chan_id = $chan_context.channel_id();
3330
+ let counterparty_node_id = $chan_context.get_counterparty_node_id();
3330
3331
let in_flight_updates;
3331
3332
let idx;
3332
3333
handle_new_monitor_update!($self, $funding_txo, $update, $peer_state, logger, chan_id,
3333
- in_flight_updates, idx, _internal_outer,
3334
+ counterparty_node_id, in_flight_updates, idx, _internal_outer,
3334
3335
{
3335
3336
let _ = in_flight_updates.remove(idx);
3336
3337
})
3337
3338
} };
3338
3339
(
3339
3340
$self: ident, $funding_txo: expr, $update: expr, $peer_state_lock: expr, $peer_state: expr,
3340
- $per_peer_state_lock: expr, $logger : expr, $channel_id: expr, POST_CHANNEL_CLOSE
3341
+ $per_peer_state_lock: expr, $counterparty_node_id : expr, $channel_id: expr, POST_CHANNEL_CLOSE
3341
3342
) => { {
3343
+ let logger = WithContext::from(&$self.logger, Some($counterparty_node_id), Some($channel_id), None);
3342
3344
let in_flight_updates;
3343
3345
let idx;
3344
- handle_new_monitor_update!($self, $funding_txo, $update, $peer_state, $ logger,
3345
- $channel_id, in_flight_updates, idx, _internal_outer,
3346
+ handle_new_monitor_update!($self, $funding_txo, $update, $peer_state, logger,
3347
+ $channel_id, $counterparty_node_id, in_flight_updates, idx, _internal_outer,
3346
3348
{
3347
3349
let _ = in_flight_updates.remove(idx);
3348
3350
if in_flight_updates.is_empty() {
@@ -3362,10 +3364,11 @@ macro_rules! handle_new_monitor_update {
3362
3364
) => { {
3363
3365
let logger = WithChannelContext::from(&$self.logger, &$chan.context, None);
3364
3366
let chan_id = $chan.context.channel_id();
3367
+ let counterparty_node_id = $chan.context.get_counterparty_node_id();
3365
3368
let in_flight_updates;
3366
3369
let idx;
3367
3370
handle_new_monitor_update!($self, $funding_txo, $update, $peer_state, logger, chan_id,
3368
- in_flight_updates, idx, _internal_outer,
3371
+ counterparty_node_id, in_flight_updates, idx, _internal_outer,
3369
3372
{
3370
3373
let _ = in_flight_updates.remove(idx);
3371
3374
if in_flight_updates.is_empty() && $chan.blocked_monitor_updates_pending() == 0 {
@@ -3962,11 +3965,10 @@ where
3962
3965
},
3963
3966
hash_map::Entry::Vacant(_) => {},
3964
3967
}
3965
- let logger = WithContext::from(&self.logger, Some(counterparty_node_id), Some(channel_id), None);
3966
3968
3967
3969
handle_new_monitor_update!(
3968
3970
self, funding_txo, monitor_update, peer_state_lock, peer_state, per_peer_state,
3969
- logger , channel_id, POST_CHANNEL_CLOSE
3971
+ counterparty_node_id , channel_id, POST_CHANNEL_CLOSE
3970
3972
);
3971
3973
}
3972
3974
@@ -7096,7 +7098,6 @@ where
7096
7098
let peer_state = &mut **peer_state_lock;
7097
7099
if let hash_map::Entry::Occupied(mut chan_phase_entry) = peer_state.channel_by_id.entry(chan_id) {
7098
7100
if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
7099
- let counterparty_node_id = chan.context.get_counterparty_node_id();
7100
7101
let logger = WithChannelContext::from(&self.logger, &chan.context, None);
7101
7102
let fulfill_res = chan.get_update_fulfill_htlc_and_commit(prev_hop.htlc_id, payment_preimage, payment_info, &&logger);
7102
7103
@@ -7111,21 +7112,8 @@ where
7111
7112
if let Some(raa_blocker) = raa_blocker_opt {
7112
7113
peer_state.actions_blocking_raa_monitor_updates.entry(chan_id).or_insert_with(Vec::new).push(raa_blocker);
7113
7114
}
7114
- if !during_init {
7115
- handle_new_monitor_update!(self, prev_hop.funding_txo, monitor_update, peer_state_opt,
7116
- peer_state, per_peer_state, chan);
7117
- } else {
7118
- // If we're running during init we cannot update a monitor directly -
7119
- // they probably haven't actually been loaded yet. Instead, push the
7120
- // monitor update as a background event.
7121
- self.pending_background_events.lock().unwrap().push(
7122
- BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
7123
- counterparty_node_id,
7124
- funding_txo: prev_hop.funding_txo,
7125
- channel_id: prev_hop.channel_id,
7126
- update: monitor_update.clone(),
7127
- });
7128
- }
7115
+ handle_new_monitor_update!(self, prev_hop.funding_txo, monitor_update, peer_state_opt,
7116
+ peer_state, per_peer_state, chan);
7129
7117
}
7130
7118
UpdateFulfillCommitFetch::DuplicateClaim {} => {
7131
7119
let (action_opt, raa_blocker_opt) = completion_action(None, true);
@@ -7250,26 +7238,10 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
7250
7238
peer_state.monitor_update_blocked_actions.entry(chan_id).or_insert(Vec::new()).push(action);
7251
7239
}
7252
7240
7253
- if !during_init {
7254
- handle_new_monitor_update!(self, prev_hop.funding_txo, preimage_update, peer_state, peer_state, per_peer_state, logger, chan_id, POST_CHANNEL_CLOSE);
7255
- } else {
7256
- // If we're running during init we cannot update a monitor directly - they probably
7257
- // haven't actually been loaded yet. Instead, push the monitor update as a background
7258
- // event.
7259
-
7260
- let in_flight_updates = peer_state.in_flight_monitor_updates
7261
- .entry(prev_hop.funding_txo)
7262
- .or_insert_with(Vec::new);
7263
- in_flight_updates.push(preimage_update.clone());
7264
-
7265
- let event = BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
7266
- counterparty_node_id,
7267
- funding_txo: prev_hop.funding_txo,
7268
- channel_id: prev_hop.channel_id,
7269
- update: preimage_update,
7270
- };
7271
- self.pending_background_events.lock().unwrap().push(event);
7272
- }
7241
+ handle_new_monitor_update!(
7242
+ self, prev_hop.funding_txo, preimage_update, peer_state, peer_state, per_peer_state,
7243
+ counterparty_node_id, chan_id, POST_CHANNEL_CLOSE
7244
+ );
7273
7245
}
7274
7246
7275
7247
fn finalize_claims(&self, sources: Vec<HTLCSource>) {
@@ -13662,14 +13634,20 @@ where
13662
13634
}
13663
13635
}
13664
13636
}
13637
+ let mut per_peer_state = per_peer_state.get(counterparty_node_id)
13638
+ .expect("If we have pending updates for a channel it must have an entry")
13639
+ .lock().unwrap();
13665
13640
if updated_id {
13666
- per_peer_state.get(counterparty_node_id)
13667
- .expect("If we have pending updates for a channel it must have an entry")
13668
- .lock().unwrap()
13641
+ per_peer_state
13669
13642
.closed_channel_monitor_update_ids.entry(*channel_id)
13670
13643
.and_modify(|v| *v = cmp::max(update.update_id, *v))
13671
13644
.or_insert(update.update_id);
13672
13645
}
13646
+ let in_flight_updates = per_peer_state.in_flight_monitor_updates
13647
+ .entry(*funding_txo)
13648
+ .or_insert_with(Vec::new);
13649
+ debug_assert!(!in_flight_updates.iter().any(|upd| upd == update));
13650
+ in_flight_updates.push(update.clone());
13673
13651
}
13674
13652
pending_background_events.push(new_event);
13675
13653
}
0 commit comments