@@ -2954,31 +2954,9 @@ macro_rules! handle_error {
2954
2954
/// [`ChannelMonitor`]/channel funding transaction) to begin with.
2955
2955
macro_rules! locked_close_channel {
2956
2956
($self: ident, $peer_state: expr, $channel_context: expr, $shutdown_res_mut: expr) => {{
2957
- if let Some((counterparty_node_id, funding_txo, channel_id, update)) = $shutdown_res_mut.monitor_update.take() {
2958
- if $self.background_events_processed_since_startup.load(Ordering::Acquire) {
2959
- handle_new_monitor_update!($self, funding_txo, update, $peer_state,
2960
- $channel_context, REMAIN_LOCKED_UPDATE_ACTIONS_PROCESSED_LATER);
2961
- } else {
2962
- // We want to track the in-flight update both in `in_flight_monitor_updates` and in
2963
- // `pending_background_events` to avoid a race condition during
2964
- // `pending_background_events` processing where we complete one
2965
- // `ChannelMonitorUpdate` (but there are more pending as background events) but we
2966
- // conclude that all pending `ChannelMonitorUpdate`s have completed and its safe to
2967
- // run post-completion actions. We could work around that with some effort, but its
2968
- // simpler to just track updates twice.
2969
- let in_flight_updates = $peer_state.in_flight_monitor_updates.entry(funding_txo)
2970
- .or_insert_with(Vec::new);
2971
- if !in_flight_updates.contains(&update) {
2972
- in_flight_updates.push(update.clone());
2973
- }
2974
- let event = BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
2975
- counterparty_node_id,
2976
- funding_txo,
2977
- channel_id,
2978
- update,
2979
- };
2980
- $self.pending_background_events.lock().unwrap().push(event);
2981
- }
2957
+ if let Some((_, funding_txo, _, update)) = $shutdown_res_mut.monitor_update.take() {
2958
+ handle_new_monitor_update!($self, funding_txo, update, $peer_state,
2959
+ $channel_context, REMAIN_LOCKED_UPDATE_ACTIONS_PROCESSED_LATER);
2982
2960
}
2983
2961
// If there's a possibility that we need to generate further monitor updates for this
2984
2962
// channel, we need to store the last update_id of it. However, we don't want to insert
@@ -3307,8 +3285,8 @@ macro_rules! handle_new_monitor_update {
3307
3285
};
3308
3286
(
3309
3287
$self: ident, $funding_txo: expr, $update: expr, $peer_state: expr, $logger: expr,
3310
- $chan_id: expr, $in_flight_updates: ident, $update_idx: ident, _internal_outer ,
3311
- $completed: expr
3288
+ $chan_id: expr, $counterparty_node_id: expr, $ in_flight_updates: ident, $update_idx: ident,
3289
+ _internal_outer, $completed: expr
3312
3290
) => { {
3313
3291
$in_flight_updates = $peer_state.in_flight_monitor_updates.entry($funding_txo)
3314
3292
.or_insert_with(Vec::new);
@@ -3320,31 +3298,55 @@ macro_rules! handle_new_monitor_update {
3320
3298
$in_flight_updates.push($update);
3321
3299
$in_flight_updates.len() - 1
3322
3300
});
3323
- let update_res = $self.chain_monitor.update_channel($funding_txo, &$in_flight_updates[$update_idx]);
3324
- handle_new_monitor_update!($self, update_res, $logger, $chan_id, _internal, $completed)
3301
+ if $self.background_events_processed_since_startup.load(Ordering::Acquire) {
3302
+ let update_res = $self.chain_monitor.update_channel($funding_txo, &$in_flight_updates[$update_idx]);
3303
+ handle_new_monitor_update!($self, update_res, $logger, $chan_id, _internal, $completed)
3304
+ } else {
3305
+ // We blindly assume that the ChannelMonitorUpdate will be regenerated on startup if we
3306
+ // fail to persist it. This is a fairly safe assumption, however, since anything we do
3307
+ // during the startup sequence should be replayed exactly if we immediately crash.
3308
+ let event = BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
3309
+ counterparty_node_id: $counterparty_node_id,
3310
+ funding_txo: $funding_txo,
3311
+ channel_id: $chan_id,
3312
+ update: $in_flight_updates[$update_idx].clone(),
3313
+ };
3314
+ // We want to track the in-flight update both in `in_flight_monitor_updates` and in
3315
+ // `pending_background_events` to avoid a race condition during
3316
+ // `pending_background_events` processing where we complete one
3317
+ // `ChannelMonitorUpdate` (but there are more pending as background events) but we
3318
+ // conclude that all pending `ChannelMonitorUpdate`s have completed and its safe to
3319
+ // run post-completion actions.
3320
+ // We could work around that with some effort, but its simpler to just track updates
3321
+ // twice.
3322
+ $self.pending_background_events.lock().unwrap().push(event);
3323
+ false
3324
+ }
3325
3325
} };
3326
3326
(
3327
3327
$self: ident, $funding_txo: expr, $update: expr, $peer_state: expr, $chan_context: expr,
3328
3328
REMAIN_LOCKED_UPDATE_ACTIONS_PROCESSED_LATER
3329
3329
) => { {
3330
3330
let logger = WithChannelContext::from(&$self.logger, &$chan_context, None);
3331
3331
let chan_id = $chan_context.channel_id();
3332
+ let counterparty_node_id = $chan_context.get_counterparty_node_id();
3332
3333
let in_flight_updates;
3333
3334
let idx;
3334
3335
handle_new_monitor_update!($self, $funding_txo, $update, $peer_state, logger, chan_id,
3335
- in_flight_updates, idx, _internal_outer,
3336
+ counterparty_node_id, in_flight_updates, idx, _internal_outer,
3336
3337
{
3337
3338
let _ = in_flight_updates.remove(idx);
3338
3339
})
3339
3340
} };
3340
3341
(
3341
3342
$self: ident, $funding_txo: expr, $update: expr, $peer_state_lock: expr, $peer_state: expr,
3342
- $per_peer_state_lock: expr, $logger : expr, $channel_id: expr, POST_CHANNEL_CLOSE
3343
+ $per_peer_state_lock: expr, $counterparty_node_id : expr, $channel_id: expr, POST_CHANNEL_CLOSE
3343
3344
) => { {
3345
+ let logger = WithContext::from(&$self.logger, Some($counterparty_node_id), Some($channel_id), None);
3344
3346
let in_flight_updates;
3345
3347
let idx;
3346
- handle_new_monitor_update!($self, $funding_txo, $update, $peer_state, $ logger,
3347
- $channel_id, in_flight_updates, idx, _internal_outer,
3348
+ handle_new_monitor_update!($self, $funding_txo, $update, $peer_state, logger,
3349
+ $channel_id, $counterparty_node_id, in_flight_updates, idx, _internal_outer,
3348
3350
{
3349
3351
let _ = in_flight_updates.remove(idx);
3350
3352
if in_flight_updates.is_empty() {
@@ -3364,10 +3366,11 @@ macro_rules! handle_new_monitor_update {
3364
3366
) => { {
3365
3367
let logger = WithChannelContext::from(&$self.logger, &$chan.context, None);
3366
3368
let chan_id = $chan.context.channel_id();
3369
+ let counterparty_node_id = $chan.context.get_counterparty_node_id();
3367
3370
let in_flight_updates;
3368
3371
let idx;
3369
3372
handle_new_monitor_update!($self, $funding_txo, $update, $peer_state, logger, chan_id,
3370
- in_flight_updates, idx, _internal_outer,
3373
+ counterparty_node_id, in_flight_updates, idx, _internal_outer,
3371
3374
{
3372
3375
let _ = in_flight_updates.remove(idx);
3373
3376
if in_flight_updates.is_empty() && $chan.blocked_monitor_updates_pending() == 0 {
@@ -3997,11 +4000,10 @@ where
3997
4000
},
3998
4001
hash_map::Entry::Vacant(_) => {},
3999
4002
}
4000
- let logger = WithContext::from(&self.logger, Some(counterparty_node_id), Some(channel_id), None);
4001
4003
4002
4004
handle_new_monitor_update!(
4003
4005
self, funding_txo, monitor_update, peer_state_lock, peer_state, per_peer_state,
4004
- logger , channel_id, POST_CHANNEL_CLOSE
4006
+ counterparty_node_id , channel_id, POST_CHANNEL_CLOSE
4005
4007
);
4006
4008
}
4007
4009
@@ -7188,7 +7190,6 @@ where
7188
7190
let peer_state = &mut **peer_state_lock;
7189
7191
if let hash_map::Entry::Occupied(mut chan_phase_entry) = peer_state.channel_by_id.entry(chan_id) {
7190
7192
if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
7191
- let counterparty_node_id = chan.context.get_counterparty_node_id();
7192
7193
let logger = WithChannelContext::from(&self.logger, &chan.context, None);
7193
7194
let fulfill_res = chan.get_update_fulfill_htlc_and_commit(prev_hop.htlc_id, payment_preimage, payment_info, &&logger);
7194
7195
@@ -7203,21 +7204,8 @@ where
7203
7204
if let Some(raa_blocker) = raa_blocker_opt {
7204
7205
peer_state.actions_blocking_raa_monitor_updates.entry(chan_id).or_insert_with(Vec::new).push(raa_blocker);
7205
7206
}
7206
- if !during_init {
7207
- handle_new_monitor_update!(self, prev_hop.funding_txo, monitor_update, peer_state_opt,
7208
- peer_state, per_peer_state, chan);
7209
- } else {
7210
- // If we're running during init we cannot update a monitor directly -
7211
- // they probably haven't actually been loaded yet. Instead, push the
7212
- // monitor update as a background event.
7213
- self.pending_background_events.lock().unwrap().push(
7214
- BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
7215
- counterparty_node_id,
7216
- funding_txo: prev_hop.funding_txo,
7217
- channel_id: prev_hop.channel_id,
7218
- update: monitor_update.clone(),
7219
- });
7220
- }
7207
+ handle_new_monitor_update!(self, prev_hop.funding_txo, monitor_update, peer_state_opt,
7208
+ peer_state, per_peer_state, chan);
7221
7209
}
7222
7210
UpdateFulfillCommitFetch::DuplicateClaim {} => {
7223
7211
let (action_opt, raa_blocker_opt) = completion_action(None, true);
@@ -7332,26 +7320,10 @@ where
7332
7320
peer_state.monitor_update_blocked_actions.entry(chan_id).or_insert(Vec::new()).push(action);
7333
7321
}
7334
7322
7335
- if !during_init {
7336
- handle_new_monitor_update!(self, prev_hop.funding_txo, preimage_update, peer_state, peer_state, per_peer_state, logger, chan_id, POST_CHANNEL_CLOSE);
7337
- } else {
7338
- // If we're running during init we cannot update a monitor directly - they probably
7339
- // haven't actually been loaded yet. Instead, push the monitor update as a background
7340
- // event.
7341
-
7342
- let in_flight_updates = peer_state.in_flight_monitor_updates
7343
- .entry(prev_hop.funding_txo)
7344
- .or_insert_with(Vec::new);
7345
- in_flight_updates.push(preimage_update.clone());
7346
-
7347
- let event = BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
7348
- counterparty_node_id,
7349
- funding_txo: prev_hop.funding_txo,
7350
- channel_id: prev_hop.channel_id,
7351
- update: preimage_update,
7352
- };
7353
- self.pending_background_events.lock().unwrap().push(event);
7354
- }
7323
+ handle_new_monitor_update!(
7324
+ self, prev_hop.funding_txo, preimage_update, peer_state, peer_state, per_peer_state,
7325
+ counterparty_node_id, chan_id, POST_CHANNEL_CLOSE
7326
+ );
7355
7327
}
7356
7328
7357
7329
fn finalize_claims(&self, sources: Vec<HTLCSource>) {
@@ -13743,14 +13715,20 @@ where
13743
13715
}
13744
13716
}
13745
13717
}
13718
+ let mut per_peer_state = per_peer_state.get(counterparty_node_id)
13719
+ .expect("If we have pending updates for a channel it has to have an entry")
13720
+ .lock().unwrap();
13746
13721
if updated_id {
13747
- per_peer_state.get(counterparty_node_id)
13748
- .expect("If we have pending updates for a channel it has to have an entry")
13749
- .lock().unwrap()
13722
+ per_peer_state
13750
13723
.closed_channel_monitor_update_ids.entry(*channel_id)
13751
13724
.and_modify(|v| *v = cmp::max(update.update_id, *v))
13752
13725
.or_insert(update.update_id);
13753
13726
}
13727
+ let in_flight_updates = per_peer_state.in_flight_monitor_updates
13728
+ .entry(*funding_txo)
13729
+ .or_insert_with(Vec::new);
13730
+ debug_assert!(!in_flight_updates.iter().any(|upd| upd == update));
13731
+ in_flight_updates.push(update.clone());
13754
13732
}
13755
13733
pending_background_events.push(new_event);
13756
13734
}
0 commit comments