Skip to content

Commit 4d6c262

Browse files
authored
Merge pull request #1119 from TheBlueMatt/2021-10-less-aggressive-htlc-timeouts
Be less aggressive in outbound HTLC CLTV timeout checks
2 parents 4bb81ff + 5e998cc commit 4d6c262

File tree

4 files changed

+18
-13
lines changed

4 files changed

+18
-13
lines changed

lightning/src/chain/channelmonitor.rs

-5
Original file line numberDiff line numberDiff line change
@@ -225,18 +225,13 @@ pub const ANTI_REORG_DELAY: u32 = 6;
225225
/// fail this HTLC,
226226
/// 2) if we receive an HTLC within this many blocks of its expiry (plus one to avoid a race
227227
/// condition with the above), we will fail this HTLC without telling the user we received it,
228-
/// 3) if we are waiting on a connection or a channel state update to send an HTLC to a peer, and
229-
/// that HTLC expires within this many blocks, we will simply fail the HTLC instead.
230228
///
231229
/// (1) is all about protecting us - we need enough time to update the channel state before we hit
232230
/// CLTV_CLAIM_BUFFER, at which point we'd go on chain to claim the HTLC with the preimage.
233231
///
234232
/// (2) is the same, but with an additional buffer to avoid accepting an HTLC which is immediately
235233
/// in a race condition between the user connecting a block (which would fail it) and the user
236234
/// providing us the preimage (which would claim it).
237-
///
238-
/// (3) is about our counterparty - we don't want to relay an HTLC to a counterparty when they may
239-
/// end up force-closing the channel on us to claim it.
240235
pub(crate) const HTLC_FAIL_BACK_BUFFER: u32 = CLTV_CLAIM_BUFFER + LATENCY_GRACE_PERIOD_BLOCKS;
241236

242237
// TODO(devrandom) replace this with HolderCommitmentTransaction

lightning/src/ln/channel.rs

+5-2
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@ use ln::chan_utils::{CounterpartyCommitmentSecrets, TxCreationKeys, HTLCOutputIn
3232
use ln::chan_utils;
3333
use chain::BestBlock;
3434
use chain::chaininterface::{FeeEstimator,ConfirmationTarget};
35-
use chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, HTLC_FAIL_BACK_BUFFER};
35+
use chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, LATENCY_GRACE_PERIOD_BLOCKS};
3636
use chain::transaction::{OutPoint, TransactionData};
3737
use chain::keysinterface::{Sign, KeysInterface};
3838
use util::ser::{Readable, ReadableArgs, Writeable, Writer, VecWriter};
@@ -4197,7 +4197,10 @@ impl<Signer: Sign> Channel<Signer> {
41974197
pub fn best_block_updated<L: Deref>(&mut self, height: u32, highest_header_time: u32, logger: &L)
41984198
-> Result<(Option<msgs::FundingLocked>, Vec<(HTLCSource, PaymentHash)>), msgs::ErrorMessage> where L::Target: Logger {
41994199
let mut timed_out_htlcs = Vec::new();
4200-
let unforwarded_htlc_cltv_limit = height + HTLC_FAIL_BACK_BUFFER;
4200+
// This mirrors the check in ChannelManager::decode_update_add_htlc_onion, refusing to
4201+
// forward an HTLC when our counterparty should almost certainly just fail it for expiring
4202+
// ~now.
4203+
let unforwarded_htlc_cltv_limit = height + LATENCY_GRACE_PERIOD_BLOCKS;
42014204
self.holding_cell_htlc_updates.retain(|htlc_update| {
42024205
match htlc_update {
42034206
&HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, ref cltv_expiry, .. } => {

lightning/src/ln/channelmanager.rs

+12-5
Original file line numberDiff line numberDiff line change
@@ -1959,17 +1959,24 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
19591959
break Some(("Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta", 0x1000 | 13, Some(self.get_channel_update_for_unicast(chan).unwrap())));
19601960
}
19611961
let cur_height = self.best_block.read().unwrap().height() + 1;
1962-
// Theoretically, channel counterparty shouldn't send us a HTLC expiring now, but we want to be robust wrt to counterparty
1963-
// packet sanitization (see HTLC_FAIL_BACK_BUFFER rational)
1962+
// Theoretically, channel counterparty shouldn't send us a HTLC expiring now,
1963+
// but we want to be robust wrt to counterparty packet sanitization (see
1964+
// HTLC_FAIL_BACK_BUFFER rationale).
19641965
if msg.cltv_expiry <= cur_height + HTLC_FAIL_BACK_BUFFER as u32 { // expiry_too_soon
19651966
break Some(("CLTV expiry is too close", 0x1000 | 14, Some(self.get_channel_update_for_unicast(chan).unwrap())));
19661967
}
19671968
if msg.cltv_expiry > cur_height + CLTV_FAR_FAR_AWAY as u32 { // expiry_too_far
19681969
break Some(("CLTV expiry is too far in the future", 21, None));
19691970
}
1970-
// In theory, we would be safe against unintentional channel-closure, if we only required a margin of LATENCY_GRACE_PERIOD_BLOCKS.
1971-
// But, to be safe against policy reception, we use a longer delay.
1972-
if (*outgoing_cltv_value) as u64 <= (cur_height + HTLC_FAIL_BACK_BUFFER) as u64 {
1971+
// If the HTLC expires ~now, don't bother trying to forward it to our
1972+
// counterparty. They should fail it anyway, but we don't want to bother with
1973+
// the round-trips or risk them deciding they definitely want the HTLC and
1974+
// force-closing to ensure they get it if we're offline.
1975+
// We previously had a much more aggressive check here which tried to ensure
1976+
// our counterparty receives an HTLC which has *our* risk threshold met on it,
1977+
// but there is no need to do that, and since we're a bit conservative with our
1978+
// risk threshold it just results in failing to forward payments.
1979+
if (*outgoing_cltv_value) as u64 <= (cur_height + LATENCY_GRACE_PERIOD_BLOCKS) as u64 {
19731980
break Some(("Outgoing CLTV value is too soon", 0x1000 | 14, Some(self.get_channel_update_for_unicast(chan).unwrap())));
19741981
}
19751982

lightning/src/ln/functional_tests.rs

+1-1
Original file line numberDiff line numberDiff line change
@@ -3994,7 +3994,7 @@ fn do_test_holding_cell_htlc_add_timeouts(forwarded_htlc: bool) {
39943994
}
39953995
check_added_monitors!(nodes[1], 0);
39963996

3997-
connect_blocks(&nodes[1], TEST_FINAL_CLTV - CLTV_CLAIM_BUFFER - LATENCY_GRACE_PERIOD_BLOCKS);
3997+
connect_blocks(&nodes[1], TEST_FINAL_CLTV - LATENCY_GRACE_PERIOD_BLOCKS);
39983998
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
39993999
assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
40004000
connect_blocks(&nodes[1], 1);

0 commit comments

Comments
 (0)