Skip to content

Commit c55f548

Browse files
authored
Merge pull request #3414 from TheBlueMatt/2024-09-async-persist-claiming-from-closed-chan
Support async `ChannelMonitorUpdate` persist for claims against closed channels
2 parents c62cd15 + 79190ad commit c55f548

File tree

3 files changed

+369
-271
lines changed

3 files changed

+369
-271
lines changed

lightning/src/ln/chanmon_update_fail_tests.rs

+129-16
Original file line numberDiff line numberDiff line change
@@ -3294,10 +3294,6 @@ fn do_test_durable_preimages_on_closed_channel(close_chans_before_reload: bool,
32943294
if !close_chans_before_reload {
32953295
check_closed_broadcast(&nodes[1], 1, true);
32963296
check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, false, &[nodes[0].node.get_our_node_id()], 100000);
3297-
} else {
3298-
// While we forwarded the payment a while ago, we don't want to process events too early or
3299-
// we'll run background tasks we wanted to test individually.
3300-
expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], None, true, !close_only_a);
33013297
}
33023298

33033299
mine_transactions(&nodes[0], &[&as_closing_tx[0], bs_preimage_tx]);
@@ -3308,24 +3304,33 @@ fn do_test_durable_preimages_on_closed_channel(close_chans_before_reload: bool,
33083304
// Make sure the B<->C channel is still alive and well by sending a payment over it.
33093305
let mut reconnect_args = ReconnectArgs::new(&nodes[1], &nodes[2]);
33103306
reconnect_args.pending_responding_commitment_signed.1 = true;
3311-
if !close_chans_before_reload {
3312-
// TODO: If the A<->B channel was closed before we reloaded, the `ChannelManager`
3313-
// will consider the forwarded payment complete and allow the B<->C
3314-
// `ChannelMonitorUpdate` to complete, wiping the payment preimage. This should not
3315-
// be allowed, and needs fixing.
3316-
reconnect_args.pending_responding_commitment_signed_dup_monitor.1 = true;
3317-
}
3307+
// The B<->C `ChannelMonitorUpdate` shouldn't be allowed to complete, which is the
3308+
// equivalent to the responding `commitment_signed` being a duplicate for node B, thus we
3309+
// need to set the `pending_responding_commitment_signed_dup` flag.
3310+
reconnect_args.pending_responding_commitment_signed_dup_monitor.1 = true;
33183311
reconnect_args.pending_raa.1 = true;
33193312

33203313
reconnect_nodes(reconnect_args);
3314+
3315+
// Once the blocked `ChannelMonitorUpdate` *finally* completes, the pending
3316+
// `PaymentForwarded` event will finally be released.
33213317
let (outpoint, ab_update_id, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id_ab).unwrap().clone();
33223318
nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, ab_update_id);
3323-
expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), true, false);
3324-
if !close_chans_before_reload {
3325-
// Once we call `process_pending_events` the final `ChannelMonitor` for the B<->C
3326-
// channel will fly, removing the payment preimage from it.
3327-
check_added_monitors(&nodes[1], 1);
3319+
3320+
// If the A<->B channel was closed before we reload, we'll replay the claim against it on
3321+
// reload, causing the `PaymentForwarded` event to get replayed.
3322+
let evs = nodes[1].node.get_and_clear_pending_events();
3323+
assert_eq!(evs.len(), if close_chans_before_reload { 2 } else { 1 });
3324+
for ev in evs {
3325+
if let Event::PaymentForwarded { .. } = ev { }
3326+
else {
3327+
panic!();
3328+
}
33283329
}
3330+
3331+
// Once we call `process_pending_events` the final `ChannelMonitor` for the B<->C channel
3332+
// will fly, removing the payment preimage from it.
3333+
check_added_monitors(&nodes[1], 1);
33293334
assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
33303335
send_payment(&nodes[1], &[&nodes[2]], 100_000);
33313336
}
@@ -3713,3 +3718,111 @@ fn test_partial_claim_mon_update_compl_actions() {
37133718
send_payment(&nodes[2], &[&nodes[3]], 100_000);
37143719
assert!(!get_monitor!(nodes[3], chan_4_id).get_stored_preimages().contains_key(&payment_hash));
37153720
}
3721+
3722+
3723+
#[test]
3724+
fn test_claim_to_closed_channel_blocks_forwarded_preimage_removal() {
3725+
// One of the last features for async persistence we implemented was the correct blocking of
3726+
// RAA(s) which remove a preimage from an outbound channel for a forwarded payment until the
3727+
// preimage write makes it durably to the closed inbound channel.
3728+
// This tests that behavior.
3729+
let chanmon_cfgs = create_chanmon_cfgs(3);
3730+
let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
3731+
let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
3732+
let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
3733+
3734+
// First open channels, route a payment, and force-close the first hop.
3735+
let chan_a = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 500_000_000);
3736+
let chan_b = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 1_000_000, 500_000_000);
3737+
3738+
let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000);
3739+
3740+
nodes[0].node.force_close_broadcasting_latest_txn(&chan_a.2, &nodes[1].node.get_our_node_id(), String::new()).unwrap();
3741+
check_added_monitors!(nodes[0], 1);
3742+
let a_reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) };
3743+
check_closed_event!(nodes[0], 1, a_reason, [nodes[1].node.get_our_node_id()], 1000000);
3744+
check_closed_broadcast!(nodes[0], true);
3745+
3746+
let as_commit_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
3747+
assert_eq!(as_commit_tx.len(), 1);
3748+
3749+
mine_transaction(&nodes[1], &as_commit_tx[0]);
3750+
check_added_monitors!(nodes[1], 1);
3751+
check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 1000000);
3752+
check_closed_broadcast!(nodes[1], true);
3753+
3754+
// Now that B has a pending forwarded payment across it with the inbound edge on-chain, claim
3755+
// the payment on C and give B the preimage for it.
3756+
nodes[2].node.claim_funds(payment_preimage);
3757+
check_added_monitors!(nodes[2], 1);
3758+
expect_payment_claimed!(nodes[2], payment_hash, 1_000_000);
3759+
3760+
let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
3761+
chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
3762+
nodes[1].node.handle_update_fulfill_htlc(nodes[2].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
3763+
check_added_monitors!(nodes[1], 1);
3764+
commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false);
3765+
3766+
// At this point nodes[1] has the preimage and is waiting for the `ChannelMonitorUpdate` for
3767+
// channel A to hit disk. Until it does so, it shouldn't ever let the preimage dissapear from
3768+
// channel B's `ChannelMonitor`
3769+
assert!(get_monitor!(nodes[1], chan_b.2).get_all_current_outbound_htlcs().iter().any(|(_, (_, preimage))| *preimage == Some(payment_preimage)));
3770+
3771+
// Once we complete the `ChannelMonitorUpdate` on channel A, and the `ChannelManager` processes
3772+
// background events (via `get_and_clear_pending_msg_events`), the final `ChannelMonitorUpdate`
3773+
// will fly and we'll drop the preimage from channel B's `ChannelMonitor`. We'll also release
3774+
// the `Event::PaymentForwarded`.
3775+
check_added_monitors!(nodes[1], 0);
3776+
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
3777+
assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
3778+
3779+
nodes[1].chain_monitor.complete_sole_pending_chan_update(&chan_a.2);
3780+
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
3781+
check_added_monitors!(nodes[1], 1);
3782+
assert!(!get_monitor!(nodes[1], chan_b.2).get_all_current_outbound_htlcs().iter().any(|(_, (_, preimage))| *preimage == Some(payment_preimage)));
3783+
expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], None, true, false);
3784+
}
3785+
3786+
#[test]
3787+
fn test_claim_to_closed_channel_blocks_claimed_event() {
3788+
// One of the last features for async persistence we implemented was the correct blocking of
3789+
// event(s) until the preimage for a claimed HTLC is durably on disk in a ChannelMonitor for a
3790+
// closed channel.
3791+
// This tests that behavior.
3792+
let chanmon_cfgs = create_chanmon_cfgs(2);
3793+
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
3794+
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
3795+
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
3796+
3797+
// First open channels, route a payment, and force-close the first hop.
3798+
let chan_a = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 500_000_000);
3799+
3800+
let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
3801+
3802+
nodes[0].node.force_close_broadcasting_latest_txn(&chan_a.2, &nodes[1].node.get_our_node_id(), String::new()).unwrap();
3803+
check_added_monitors!(nodes[0], 1);
3804+
let a_reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) };
3805+
check_closed_event!(nodes[0], 1, a_reason, [nodes[1].node.get_our_node_id()], 1000000);
3806+
check_closed_broadcast!(nodes[0], true);
3807+
3808+
let as_commit_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
3809+
assert_eq!(as_commit_tx.len(), 1);
3810+
3811+
mine_transaction(&nodes[1], &as_commit_tx[0]);
3812+
check_added_monitors!(nodes[1], 1);
3813+
check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 1000000);
3814+
check_closed_broadcast!(nodes[1], true);
3815+
3816+
// Now that B has a pending payment with the inbound HTLC on a closed channel, claim the
3817+
// payment on disk, but don't let the `ChannelMonitorUpdate` complete. This should prevent the
3818+
// `Event::PaymentClaimed` from being generated.
3819+
chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
3820+
nodes[1].node.claim_funds(payment_preimage);
3821+
check_added_monitors!(nodes[1], 1);
3822+
assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
3823+
3824+
// Once we complete the `ChannelMonitorUpdate` the `Event::PaymentClaimed` will become
3825+
// available.
3826+
nodes[1].chain_monitor.complete_sole_pending_chan_update(&chan_a.2);
3827+
expect_payment_claimed!(nodes[1], payment_hash, 1_000_000);
3828+
}

0 commit comments

Comments
 (0)