Skip to content

Commit 48cb03c

Browse files
committed
Stop relying on ChannelMonitor persistence after manager read
When we discover we've only partially claimed an MPP HTLC during `ChannelManager` reading, we need to add the payment preimage to all other `ChannelMonitor`s that were a part of the payment. We previously did this with a direct call on the `ChannelMonitor`, requiring users write the full `ChannelMonitor` to disk to ensure that updated information made it. This adds quite a bit of delay during initial startup - fully resilvering each `ChannelMonitor` just to handle this one case is incredibly excessive. Over the past few commits we dropped the need to pass HTLCs directly to the `ChannelMonitor`s using the background events to provide `ChannelMonitorUpdate`s insetad. Thus, here we finally drop the requirement to resilver `ChannelMonitor`s on startup.
1 parent 60e30b8 commit 48cb03c

File tree

5 files changed

+21
-11
lines changed

5 files changed

+21
-11
lines changed

fuzz/src/chanmon_consistency.rs

+1-1
Original file line numberDiff line numberDiff line change
@@ -768,7 +768,7 @@ pub fn do_test<Out: Output>(data: &[u8], underlying_out: Out, anchors: bool) {
768768
chain_monitor.latest_monitors.lock().unwrap().insert(outpoint, prev_state);
769769
}
770770
let mut monitor_refs = new_hash_map();
771-
for (outpoint, monitor) in monitors.iter_mut() {
771+
for (outpoint, monitor) in monitors.iter() {
772772
monitor_refs.insert(*outpoint, monitor);
773773
}
774774

lightning/src/ln/channelmanager.rs

+9-6
Original file line numberDiff line numberDiff line change
@@ -1615,7 +1615,7 @@ where
16151615
/// let mut channel_monitors = read_channel_monitors();
16161616
/// let args = ChannelManagerReadArgs::new(
16171617
/// entropy_source, node_signer, signer_provider, fee_estimator, chain_monitor, tx_broadcaster,
1618-
/// router, message_router, logger, default_config, channel_monitors.iter_mut().collect(),
1618+
/// router, message_router, logger, default_config, channel_monitors.iter().collect(),
16191619
/// );
16201620
/// let (block_hash, channel_manager) =
16211621
/// <(BlockHash, ChannelManager<_, _, _, _, _, _, _, _, _>)>::read(&mut reader, args)?;
@@ -12226,9 +12226,12 @@ impl Readable for VecDeque<(Event, Option<EventCompletionAction>)> {
1222612226
/// 3) If you are not fetching full blocks, register all relevant [`ChannelMonitor`] outpoints the
1222712227
/// same way you would handle a [`chain::Filter`] call using
1222812228
/// [`ChannelMonitor::get_outputs_to_watch`] and [`ChannelMonitor::get_funding_txo`].
12229-
/// 4) Reconnect blocks on your [`ChannelMonitor`]s.
12230-
/// 5) Disconnect/connect blocks on the [`ChannelManager`].
12231-
/// 6) Re-persist the [`ChannelMonitor`]s to ensure the latest state is on disk.
12229+
/// 4) Disconnect/connect blocks on your [`ChannelMonitor`]s to get them in sync with the chain.
12230+
/// 5) Disconnect/connect blocks on the [`ChannelManager`] to get it in sync with the chain.
12231+
/// 6) Optionally re-persist the [`ChannelMonitor`]s to ensure the latest state is on disk.
12232+
/// This is important if you have replayed a nontrivial number of blocks in step (4), allowing
12233+
/// you to avoid having to replay the same blocks if you shut down quickly after startup. It is
12234+
/// otherwise not required.
1223212235
/// Note that if you're using a [`ChainMonitor`] for your [`chain::Watch`] implementation, you
1223312236
/// will likely accomplish this as a side-effect of calling [`chain::Watch::watch_channel`] in
1223412237
/// the next step.
@@ -12311,7 +12314,7 @@ where
1231112314
/// this struct.
1231212315
///
1231312316
/// This is not exported to bindings users because we have no HashMap bindings
12314-
pub channel_monitors: HashMap<OutPoint, &'a mut ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>>,
12317+
pub channel_monitors: HashMap<OutPoint, &'a ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>>,
1231512318
}
1231612319

1231712320
impl<'a, M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, MR: Deref, L: Deref>
@@ -12334,7 +12337,7 @@ where
1233412337
entropy_source: ES, node_signer: NS, signer_provider: SP, fee_estimator: F,
1233512338
chain_monitor: M, tx_broadcaster: T, router: R, message_router: MR, logger: L,
1233612339
default_config: UserConfig,
12337-
mut channel_monitors: Vec<&'a mut ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>>,
12340+
mut channel_monitors: Vec<&'a ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>>,
1233812341
) -> Self {
1233912342
Self {
1234012343
entropy_source, node_signer, signer_provider, fee_estimator, chain_monitor,

lightning/src/ln/functional_test_utils.rs

+2-2
Original file line numberDiff line numberDiff line change
@@ -686,7 +686,7 @@ impl<'a, 'b, 'c> Drop for Node<'a, 'b, 'c> {
686686
// them to ensure we can write and reload our ChannelManager.
687687
{
688688
let mut channel_monitors = new_hash_map();
689-
for monitor in deserialized_monitors.iter_mut() {
689+
for monitor in deserialized_monitors.iter() {
690690
channel_monitors.insert(monitor.get_funding_txo().0, monitor);
691691
}
692692

@@ -1128,7 +1128,7 @@ pub fn _reload_node<'a, 'b, 'c>(node: &'a Node<'a, 'b, 'c>, default_config: User
11281128
let mut node_read = &chanman_encoded[..];
11291129
let (_, node_deserialized) = {
11301130
let mut channel_monitors = new_hash_map();
1131-
for monitor in monitors_read.iter_mut() {
1131+
for monitor in monitors_read.iter() {
11321132
assert!(channel_monitors.insert(monitor.get_funding_txo().0, monitor).is_none());
11331133
}
11341134
<(BlockHash, TestChannelManager<'b, 'c>)>::read(&mut node_read, ChannelManagerReadArgs {

lightning/src/ln/reload_tests.rs

+2-2
Original file line numberDiff line numberDiff line change
@@ -426,7 +426,7 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() {
426426
chain_monitor: nodes[0].chain_monitor,
427427
tx_broadcaster: nodes[0].tx_broadcaster,
428428
logger: &logger,
429-
channel_monitors: node_0_stale_monitors.iter_mut().map(|monitor| { (monitor.get_funding_txo().0, monitor) }).collect(),
429+
channel_monitors: node_0_stale_monitors.iter().map(|monitor| { (monitor.get_funding_txo().0, monitor) }).collect(),
430430
}) { } else {
431431
panic!("If the monitor(s) are stale, this indicates a bug and we should get an Err return");
432432
};
@@ -444,7 +444,7 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() {
444444
chain_monitor: nodes[0].chain_monitor,
445445
tx_broadcaster: nodes[0].tx_broadcaster,
446446
logger: &logger,
447-
channel_monitors: node_0_monitors.iter_mut().map(|monitor| { (monitor.get_funding_txo().0, monitor) }).collect(),
447+
channel_monitors: node_0_monitors.iter().map(|monitor| { (monitor.get_funding_txo().0, monitor) }).collect(),
448448
}).unwrap();
449449
nodes_0_deserialized = nodes_0_deserialized_tmp;
450450
assert!(nodes_0_read.is_empty());

pending_changelog/3322-b.txt

+7
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,7 @@
1+
API Updates
2+
===========
3+
4+
As a part of adding robustness against several unlikely scenarios, redundant `PaymentClaimed`
5+
`Event`s will be generated more frequently on startup for payments received on LDK 0.1 and
6+
newer. A new `Event::PaymentClaimed::payment_id` field may be used to better differentiate
7+
between redundant payments.

0 commit comments

Comments
 (0)