@@ -3619,14 +3619,23 @@ impl TrustedChannelFeatures {
36193619struct ClaimCompletionActionParams {
36203620 definitely_duplicate: bool,
36213621 inbound_htlc_value_msat: Option<u64>,
3622+ inbound_edge_closed: bool,
36223623}
36233624
36243625impl ClaimCompletionActionParams {
36253626 fn new_claim(inbound_htlc_value_msat: u64) -> Self {
3626- Self { definitely_duplicate: false, inbound_htlc_value_msat: Some(inbound_htlc_value_msat) }
3627+ Self {
3628+ definitely_duplicate: false,
3629+ inbound_htlc_value_msat: Some(inbound_htlc_value_msat),
3630+ inbound_edge_closed: false,
3631+ }
36273632 }
36283633 fn duplicate_claim() -> Self {
3629- Self { definitely_duplicate: true, inbound_htlc_value_msat: None }
3634+ Self {
3635+ definitely_duplicate: true,
3636+ inbound_htlc_value_msat: None,
3637+ inbound_edge_closed: false,
3638+ }
36303639 }
36313640}
36323641
@@ -9635,16 +9644,56 @@ impl<
96359644 monitor_event_id
96369645 .map(|event_id| MonitorEventSource { event_id, channel_id: next_channel_id }),
96379646 |claim_completion_action_params| {
9638- let ClaimCompletionActionParams { definitely_duplicate, inbound_htlc_value_msat } =
9639- claim_completion_action_params;
9647+ let ClaimCompletionActionParams {
9648+ definitely_duplicate,
9649+ inbound_htlc_value_msat,
9650+ inbound_edge_closed,
9651+ } = claim_completion_action_params;
96409652 let chan_to_release = EventUnblockedChannel {
96419653 counterparty_node_id: next_channel_counterparty_node_id,
96429654 funding_txo: next_channel_outpoint,
96439655 channel_id: next_channel_id,
96449656 blocking_action: completed_blocker,
96459657 };
96469658
9647- if definitely_duplicate && startup_replay {
9659+ if self.persistent_monitor_events {
9660+ let monitor_event_source = monitor_event_id.map(|event_id| {
9661+ MonitorEventSource { event_id, channel_id: next_channel_id }
9662+ });
9663+ // If persistent_monitor_events is enabled, then we'll get a MonitorEvent for this HTLC
9664+ // claim re-provided to us until we explicitly ack it.
9665+ // * If the inbound edge is closed, then we can ack it when we know the preimage is
9666+ // durably persisted there + the user has processed a `PaymentForwarded` event
9667+ // * If the inbound edge is open, then we'll ack the monitor event when HTLC has been
9668+ // irrevocably removed via revoke_and_ack. This prevents forgetting to claim the HTLC
9669+ // backwards if we lose the off-chain HTLC from the holding cell after a restart.
9670+ if definitely_duplicate {
9671+ if inbound_edge_closed {
9672+ if let Some(id) = monitor_event_source {
9673+ self.chain_monitor.ack_monitor_event(id);
9674+ }
9675+ }
9676+ (None, None)
9677+ } else if let Some(event) =
9678+ make_payment_forwarded_event(inbound_htlc_value_msat)
9679+ {
9680+ let preimage_update_action =
9681+ MonitorUpdateCompletionAction::EmitForwardEvent {
9682+ event,
9683+ post_event_ackable_monitor_event: inbound_edge_closed
9684+ .then_some(monitor_event_source)
9685+ .flatten(),
9686+ };
9687+ (Some(preimage_update_action), None)
9688+ } else if inbound_edge_closed {
9689+ let preimage_update_action = monitor_event_source.map(|src| {
9690+ MonitorUpdateCompletionAction::AckMonitorEvents { event_ids: vec![src] }
9691+ });
9692+ (preimage_update_action, None)
9693+ } else {
9694+ (None, None)
9695+ }
9696+ } else if definitely_duplicate && startup_replay {
96489697 // On startup we may get redundant claims which are related to
96499698 // monitor updates still in flight. In that case, we shouldn't
96509699 // immediately free, but instead let that monitor update complete
@@ -9977,6 +10026,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
997710026 let (action_opt, raa_blocker_opt) = completion_action(ClaimCompletionActionParams {
997810027 definitely_duplicate: false,
997910028 inbound_htlc_value_msat: None,
10029+ inbound_edge_closed: true,
998010030 });
998110031
998210032 if let Some(raa_blocker) = raa_blocker_opt {
@@ -12691,23 +12741,28 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
1269112741 chan.update_fulfill_htlc(&msg),
1269212742 chan_entry
1269312743 );
12694- let prev_hops = match &res.0 {
12695- HTLCSource::PreviousHopData(prev_hop) => vec![prev_hop],
12696- HTLCSource::TrampolineForward { previous_hop_data, .. } => {
12697- previous_hop_data.iter().collect()
12698- },
12699- _ => vec![],
12700- };
12701- let logger = WithChannelContext::from(&self.logger, &chan.context, None);
12702- for prev_hop in prev_hops {
12703- log_trace!(logger,
12704- "Holding the next revoke_and_ack until the preimage is durably persisted in the inbound edge's ChannelMonitor",
12705- );
12706- peer_state
12707- .actions_blocking_raa_monitor_updates
12708- .entry(msg.channel_id)
12709- .or_insert_with(Vec::new)
12710- .push(RAAMonitorUpdateBlockingAction::from_prev_hop_data(prev_hop));
12744+ if !self.persistent_monitor_events {
12745+ let prev_hops = match &res.0 {
12746+ HTLCSource::PreviousHopData(prev_hop) => vec![prev_hop],
12747+ HTLCSource::TrampolineForward { previous_hop_data, .. } => {
12748+ previous_hop_data.iter().collect()
12749+ },
12750+ _ => vec![],
12751+ };
12752+ let logger =
12753+ WithChannelContext::from(&self.logger, &chan.context, None);
12754+ for prev_hop in prev_hops {
12755+ log_trace!(logger,
12756+ "Holding the next revoke_and_ack until the preimage is durably persisted in the inbound edge's ChannelMonitor",
12757+ );
12758+ peer_state
12759+ .actions_blocking_raa_monitor_updates
12760+ .entry(msg.channel_id)
12761+ .or_insert_with(Vec::new)
12762+ .push(RAAMonitorUpdateBlockingAction::from_prev_hop_data(
12763+ prev_hop,
12764+ ));
12765+ }
1271112766 }
1271212767
1271312768 // Note that we do not need to push an `actions_blocking_raa_monitor_updates`
@@ -13709,29 +13764,22 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
1370913764 .channel_by_id
1371013765 .contains_key(&channel_id)
1371113766 });
13712- let we_are_sender =
13713- matches!(htlc_update.source, HTLCSource::OutboundRoute { .. });
13714- if from_onchain | we_are_sender {
13715- // Claim the funds from the previous hop, if there is one. Because this is in response to a
13716- // chain event, no attribution data is available.
13717- self.claim_funds_internal(
13718- htlc_update.source,
13719- preimage,
13720- htlc_update.htlc_value_msat,
13721- None,
13722- from_onchain,
13723- counterparty_node_id,
13724- funding_outpoint,
13725- channel_id,
13726- htlc_update.user_channel_id,
13727- None,
13728- None,
13729- Some(event_id),
13730- );
13731- }
13732- if !we_are_sender {
13733- self.chain_monitor.ack_monitor_event(monitor_event_source);
13734- }
13767+ // Claim the funds from the previous hop, if there is one. Because this is in response to a
13768+ // chain event, no attribution data is available.
13769+ self.claim_funds_internal(
13770+ htlc_update.source,
13771+ preimage,
13772+ htlc_update.htlc_value_msat,
13773+ None,
13774+ from_onchain,
13775+ counterparty_node_id,
13776+ funding_outpoint,
13777+ channel_id,
13778+ htlc_update.user_channel_id,
13779+ None,
13780+ None,
13781+ Some(event_id),
13782+ );
1373513783 } else {
1373613784 log_trace!(logger, "Failing HTLC from our monitor");
1373713785 let failure_reason = LocalHTLCFailureReason::OnChainTimeout;
@@ -20658,6 +20706,9 @@ impl<
2065820706 downstream_user_channel_id,
2065920707 ) in pending_claims_to_replay
2066020708 {
20709+ if channel_manager.persistent_monitor_events {
20710+ continue;
20711+ }
2066120712 // We use `downstream_closed` in place of `from_onchain` here just as a guess - we
2066220713 // don't remember in the `ChannelMonitor` where we got a preimage from, but if the
2066320714 // channel is closed we just assume that it probably came from an on-chain claim.
0 commit comments