Skip to content

Commit 938d2a2

Browse files
committed
Avoid startup PeerState entries for peers with unfunded channels
If a peer creates a channel with us which never reaches the funding stage (or never gets any commitment updates after creation), we'll avoid inserting the `update_id` into `closed_channel_monitor_update_ids` at runtime to avoid keeping a `PeerState` entry around for no reason. However, on startup we still create a `ChannelMonitorUpdate` with a `ChannelForceClosed` update step to ensure the `ChannelMonitor` is locked and shut down. This is pretty redundant, and results in a bunch of on-startup `ChannelMonitorUpdate`s for any old but non-archived `ChannelMonitor`s. Instead, here, we check if a `ChannelMonitor` already saw a `ChannelForceClosed` update step before we generate the on-startup `ChannelMonitorUpdate`. This also allows us to skip the `closed_channel_monitor_update_ids` insertion as we can be confident we'll never have a `ChannelMonitorUpdate` for this channel at all.
1 parent 00087f1 commit 938d2a2

File tree

4 files changed

+10
-65
lines changed

4 files changed

+10
-65
lines changed

lightning/src/chain/channelmonitor.rs

+6
Original file line numberDiff line numberDiff line change
@@ -1711,6 +1711,12 @@ impl<Signer: EcdsaChannelSigner> ChannelMonitor<Signer> {
17111711
self.inner.lock().unwrap().get_cur_holder_commitment_number()
17121712
}
17131713

1714+
/// Gets whether we've been notified that this channel is closed by the `ChannelManager` (i.e.
1715+
/// via a [`ChannelMonitorUpdateStep::ChannelForceClosed`]).
1716+
pub(crate) fn offchain_closed(&self) -> bool {
1717+
self.inner.lock().unwrap().lockdown_from_offchain
1718+
}
1719+
17141720
/// Gets the `node_id` of the counterparty for this channel.
17151721
///
17161722
/// Will be `None` for channels constructed on LDK versions prior to 0.0.110 and always `Some`

lightning/src/ln/channelmanager.rs

+4-57
Original file line numberDiff line numberDiff line change
@@ -7253,8 +7253,6 @@ where
72537253
let prev_channel_id = hop_data.channel_id;
72547254
let prev_user_channel_id = hop_data.user_channel_id;
72557255
let completed_blocker = RAAMonitorUpdateBlockingAction::from_prev_hop_data(&hop_data);
7256-
#[cfg(debug_assertions)]
7257-
let claiming_chan_funding_outpoint = hop_data.outpoint;
72587256
self.claim_funds_from_hop(hop_data, payment_preimage, None,
72597257
|htlc_claim_value_msat, definitely_duplicate| {
72607258
let chan_to_release =
@@ -7279,61 +7277,6 @@ where
72797277
// monitor updates still in flight. In that case, we shouldn't
72807278
// immediately free, but instead let that monitor update complete
72817279
// in the background.
7282-
#[cfg(debug_assertions)] {
7283-
let background_events = self.pending_background_events.lock().unwrap();
7284-
// There should be a `BackgroundEvent` pending...
7285-
assert!(background_events.iter().any(|ev| {
7286-
match ev {
7287-
BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
7288-
funding_txo, update, ..
7289-
} => {
7290-
if *funding_txo == claiming_chan_funding_outpoint {
7291-
// to apply a monitor update that blocked the claiming channel,
7292-
assert!(update.updates.iter().any(|upd|
7293-
if let ChannelMonitorUpdateStep::PaymentPreimage {
7294-
payment_preimage: update_preimage, ..
7295-
} = upd {
7296-
payment_preimage == *update_preimage
7297-
} else {
7298-
false
7299-
}
7300-
), "{:?}", update);
7301-
true
7302-
} else if *funding_txo == next_channel_outpoint {
7303-
// or the channel we'd unblock is already closed,
7304-
assert!(update.updates.iter().any(|upd|
7305-
if let ChannelMonitorUpdateStep::ChannelForceClosed { .. } = upd {
7306-
true
7307-
} else {
7308-
false
7309-
}
7310-
), "{:?}", update);
7311-
true
7312-
} else { false }
7313-
},
7314-
// or the channel we'd unblock is already closed (for an
7315-
// old channel),
7316-
BackgroundEvent::ClosedMonitorUpdateRegeneratedOnStartup(
7317-
(funding_txo, _channel_id, monitor_update)
7318-
) => {
7319-
if *funding_txo == next_channel_outpoint {
7320-
assert_eq!(monitor_update.updates.len(), 1);
7321-
assert!(matches!(
7322-
monitor_update.updates[0],
7323-
ChannelMonitorUpdateStep::ChannelForceClosed { .. }
7324-
));
7325-
true
7326-
} else { false }
7327-
},
7328-
// or the monitor update has completed and will unblock
7329-
// immediately once we get going.
7330-
BackgroundEvent::MonitorUpdatesComplete {
7331-
channel_id, ..
7332-
} =>
7333-
*channel_id == prev_channel_id,
7334-
}
7335-
}), "{:?}", *background_events);
7336-
}
73377280
(None, None)
73387281
} else if definitely_duplicate {
73397282
if let Some(other_chan) = chan_to_release {
@@ -12636,6 +12579,10 @@ where
1263612579
}
1263712580

1263812581
for (funding_txo, monitor) in args.channel_monitors.iter() {
12582+
if monitor.offchain_closed() {
12583+
// We already appled a ChannelForceClosed update.
12584+
continue;
12585+
}
1263912586
if !funding_txo_set.contains(funding_txo) {
1264012587
let logger = WithChannelMonitor::from(&args.logger, monitor, None);
1264112588
let channel_id = monitor.channel_id();

lightning/src/ln/monitor_tests.rs

-4
Original file line numberDiff line numberDiff line change
@@ -2302,9 +2302,6 @@ fn do_test_restored_packages_retry(check_old_monitor_retries_after_upgrade: bool
23022302

23032303
// Connecting more blocks should result in the HTLC transactions being rebroadcast.
23042304
connect_blocks(&nodes[0], crate::chain::package::LOW_FREQUENCY_BUMP_INTERVAL);
2305-
if check_old_monitor_retries_after_upgrade {
2306-
check_added_monitors(&nodes[0], 1);
2307-
}
23082305
{
23092306
let txn = nodes[0].tx_broadcaster.txn_broadcast();
23102307
assert_eq!(txn.len(), 1);
@@ -3014,7 +3011,6 @@ fn do_test_anchors_monitor_fixes_counterparty_payment_script_on_reload(confirm_c
30143011
// If we saw the commitment before our `counterparty_payment_script` was fixed, we'll never
30153012
// get the spendable output event for the `to_remote` output, so we'll need to get it
30163013
// manually via `get_spendable_outputs`.
3017-
check_added_monitors(&nodes[1], 1);
30183014
let outputs = get_monitor!(nodes[1], chan_id).get_spendable_outputs(&commitment_tx, commitment_tx_conf_height);
30193015
assert_eq!(outputs.len(), 1);
30203016
let spend_tx = nodes[1].keys_manager.backing.spend_spendable_outputs(

lightning/src/ln/payment_tests.rs

-4
Original file line numberDiff line numberDiff line change
@@ -993,7 +993,6 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) {
993993
nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id());
994994

995995
nodes[0].node.test_process_background_events();
996-
check_added_monitors(&nodes[0], 1);
997996

998997
let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
999998
reconnect_args.send_channel_ready = (true, true);
@@ -1023,7 +1022,6 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) {
10231022
nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id());
10241023

10251024
nodes[0].node.test_process_background_events();
1026-
check_added_monitors(&nodes[0], 1);
10271025

10281026
reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));
10291027

@@ -1162,7 +1160,6 @@ fn do_test_dup_htlc_onchain_doesnt_fail_on_reload(persist_manager_post_event: bo
11621160
let height = nodes[0].blocks.lock().unwrap().len() as u32 - 1;
11631161
nodes[0].chain_monitor.chain_monitor.block_connected(&claim_block, height);
11641162
assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
1165-
check_added_monitors(&nodes[0], 1);
11661163
}
11671164

11681165
#[test]
@@ -3522,7 +3519,6 @@ fn do_no_missing_sent_on_reload(persist_manager_with_payment: bool, at_midpoint:
35223519
reload_node!(nodes[0], test_default_channel_config(), &nodes[0].node.encode(), &[&chan_0_monitor_serialized], persister_c, chain_monitor_c, nodes_0_deserialized_c);
35233520
let events = nodes[0].node.get_and_clear_pending_events();
35243521
assert!(events.is_empty());
3525-
check_added_monitors(&nodes[0], 1);
35263522
}
35273523

35283524
#[test]

0 commit comments

Comments
 (0)