diff --git a/lightning/src/chain/channelmonitor.rs b/lightning/src/chain/channelmonitor.rs index 040ac6b3f2f..0f59f0e1bb9 100644 --- a/lightning/src/chain/channelmonitor.rs +++ b/lightning/src/chain/channelmonitor.rs @@ -223,16 +223,20 @@ impl_writeable_tlv_based!(HTLCUpdate, { /// transaction. pub(crate) const COUNTERPARTY_CLAIMABLE_WITHIN_BLOCKS_PINNABLE: u32 = 12; -/// When we go to force-close a channel because an HTLC is expiring, we should ensure that the -/// HTLC(s) expiring are not considered pinnable, allowing us to aggregate them with other HTLC(s) -/// expiring at the same time. -const _: () = assert!(CLTV_CLAIM_BUFFER > COUNTERPARTY_CLAIMABLE_WITHIN_BLOCKS_PINNABLE); +/// When we go to force-close a channel because an HTLC is expiring, by the time we've gotten the +/// commitment transaction confirmed, we should ensure that the HTLC(s) expiring are not considered +/// pinnable, allowing us to aggregate them with other HTLC(s) expiring at the same time. +const _: () = assert!(MAX_BLOCKS_FOR_CONF > COUNTERPARTY_CLAIMABLE_WITHIN_BLOCKS_PINNABLE); + +/// The upper bound on how many blocks we think it can take for us to get a transaction confirmed. +pub(crate) const MAX_BLOCKS_FOR_CONF: u32 = 18; /// If an HTLC expires within this many blocks, force-close the channel to broadcast the /// HTLC-Success transaction. -/// In other words, this is an upper bound on how many blocks we think it can take us to get a -/// transaction confirmed (and we use it in a few more, equivalent, places). -pub(crate) const CLTV_CLAIM_BUFFER: u32 = 18; +/// +/// This is two times [`MAX_BLOCKS_FOR_CONF`] as we need to first get the commitment transaction +/// confirmed, then get an HTLC transaction confirmed. +pub(crate) const CLTV_CLAIM_BUFFER: u32 = MAX_BLOCKS_FOR_CONF * 2; /// Number of blocks by which point we expect our counterparty to have seen new blocks on the /// network and done a full update_fail_htlc/commitment_signed dance (+ we've updated all our /// copies of ChannelMonitors, including watchtowers). We could enforce the contract by failing @@ -4511,18 +4515,6 @@ impl ChannelMonitorImpl { // chain when our counterparty is waiting for expiration to off-chain fail an HTLC // we give ourselves a few blocks of headroom after expiration before going // on-chain for an expired HTLC. - // Note that, to avoid a potential attack whereby a node delays claiming an HTLC - // from us until we've reached the point where we go on-chain with the - // corresponding inbound HTLC, we must ensure that outbound HTLCs go on chain at - // least CLTV_CLAIM_BUFFER blocks prior to the inbound HTLC. - // aka outbound_cltv + LATENCY_GRACE_PERIOD_BLOCKS == height - CLTV_CLAIM_BUFFER - // inbound_cltv == height + CLTV_CLAIM_BUFFER - // outbound_cltv + LATENCY_GRACE_PERIOD_BLOCKS + CLTV_CLAIM_BUFFER <= inbound_cltv - CLTV_CLAIM_BUFFER - // LATENCY_GRACE_PERIOD_BLOCKS + 2*CLTV_CLAIM_BUFFER <= inbound_cltv - outbound_cltv - // CLTV_EXPIRY_DELTA <= inbound_cltv - outbound_cltv (by check in ChannelManager::decode_update_add_htlc_onion) - // LATENCY_GRACE_PERIOD_BLOCKS + 2*CLTV_CLAIM_BUFFER <= CLTV_EXPIRY_DELTA - // The final, above, condition is checked for statically in channelmanager - // with CHECK_CLTV_EXPIRY_SANITY_2. let htlc_outbound = $holder_tx == htlc.offered; if ( htlc_outbound && htlc.cltv_expiry + LATENCY_GRACE_PERIOD_BLOCKS <= height) || (!htlc_outbound && htlc.cltv_expiry <= height + CLTV_CLAIM_BUFFER && self.payment_preimages.contains_key(&htlc.payment_hash)) { diff --git a/lightning/src/ln/async_payments_tests.rs b/lightning/src/ln/async_payments_tests.rs index c8916978afa..b888b9ceb5c 100644 --- a/lightning/src/ln/async_payments_tests.rs +++ b/lightning/src/ln/async_payments_tests.rs @@ -540,6 +540,13 @@ fn async_receive_mpp() { create_announced_chan_between_nodes(&nodes, 0, 2); create_unannounced_chan_between_nodes_with_value(&nodes, 1, 3, 1_000_000, 0); create_unannounced_chan_between_nodes_with_value(&nodes, 2, 3, 1_000_000, 0); + + // Ensure all nodes start at the same height. + connect_blocks(&nodes[0], 4 * CHAN_CONFIRM_DEPTH + 1 - nodes[0].best_block_info().1); + connect_blocks(&nodes[1], 4 * CHAN_CONFIRM_DEPTH + 1 - nodes[1].best_block_info().1); + connect_blocks(&nodes[2], 4 * CHAN_CONFIRM_DEPTH + 1 - nodes[2].best_block_info().1); + connect_blocks(&nodes[3], 4 * CHAN_CONFIRM_DEPTH + 1 - nodes[3].best_block_info().1); + let (offer, static_invoice) = create_static_invoice(&nodes[1], &nodes[3], None, &secp_ctx); // In other tests we hardcode the sender's random bytes so we can predict the keysend preimage to @@ -622,6 +629,12 @@ fn amount_doesnt_match_invreq() { create_unannounced_chan_between_nodes_with_value(&nodes, 1, 3, 1_000_000, 0); create_unannounced_chan_between_nodes_with_value(&nodes, 2, 3, 1_000_000, 0); + // Ensure all nodes start at the same height. + connect_blocks(&nodes[0], 4 * CHAN_CONFIRM_DEPTH + 1 - nodes[0].best_block_info().1); + connect_blocks(&nodes[1], 4 * CHAN_CONFIRM_DEPTH + 1 - nodes[1].best_block_info().1); + connect_blocks(&nodes[2], 4 * CHAN_CONFIRM_DEPTH + 1 - nodes[2].best_block_info().1); + connect_blocks(&nodes[3], 4 * CHAN_CONFIRM_DEPTH + 1 - nodes[3].best_block_info().1); + let (offer, static_invoice) = create_static_invoice(&nodes[1], &nodes[3], None, &secp_ctx); // Set the random bytes so we can predict the payment preimage and hash. @@ -815,9 +828,15 @@ fn invalid_async_receive_with_retry( let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, Some(allow_priv_chan_fwds_cfg), None]); let nodes = create_network(3, &node_cfgs, &node_chanmgrs); + create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 0); create_unannounced_chan_between_nodes_with_value(&nodes, 1, 2, 1_000_000, 0); + // Ensure all nodes start at the same height. + connect_blocks(&nodes[0], 2 * CHAN_CONFIRM_DEPTH + 1 - nodes[0].best_block_info().1); + connect_blocks(&nodes[1], 2 * CHAN_CONFIRM_DEPTH + 1 - nodes[1].best_block_info().1); + connect_blocks(&nodes[2], 2 * CHAN_CONFIRM_DEPTH + 1 - nodes[2].best_block_info().1); + let blinded_paths_to_always_online_node = nodes[1] .message_router .create_blinded_paths( diff --git a/lightning/src/ln/blinded_payment_tests.rs b/lightning/src/ln/blinded_payment_tests.rs index 9fc722f8205..5c52ecc2f4d 100644 --- a/lightning/src/ln/blinded_payment_tests.rs +++ b/lightning/src/ln/blinded_payment_tests.rs @@ -204,6 +204,12 @@ fn mpp_to_one_hop_blinded_path() { let chan_upd_1_3 = create_announced_chan_between_nodes(&nodes, 1, 3).0.contents; create_announced_chan_between_nodes(&nodes, 2, 3).0.contents; + // Ensure all nodes start at the same height. + connect_blocks(&nodes[0], 4*CHAN_CONFIRM_DEPTH + 1 - nodes[0].best_block_info().1); + connect_blocks(&nodes[1], 4*CHAN_CONFIRM_DEPTH + 1 - nodes[1].best_block_info().1); + connect_blocks(&nodes[2], 4*CHAN_CONFIRM_DEPTH + 1 - nodes[2].best_block_info().1); + connect_blocks(&nodes[3], 4*CHAN_CONFIRM_DEPTH + 1 - nodes[3].best_block_info().1); + let amt_msat = 15_000_000; let (payment_preimage, payment_hash, payment_secret) = get_payment_preimage_hash(&nodes[3], Some(amt_msat), None); let payee_tlvs = UnauthenticatedReceiveTlvs { @@ -267,6 +273,14 @@ fn mpp_to_three_hop_blinded_paths() { let chan_upd_3_5 = create_announced_chan_between_nodes(&nodes, 3, 5).0.contents; let chan_upd_4_5 = create_announced_chan_between_nodes(&nodes, 4, 5).0.contents; + // Start every node on the same block height to make reasoning about timeouts easier + connect_blocks(&nodes[0], 6*CHAN_CONFIRM_DEPTH + 1 - nodes[0].best_block_info().1); + connect_blocks(&nodes[1], 6*CHAN_CONFIRM_DEPTH + 1 - nodes[1].best_block_info().1); + connect_blocks(&nodes[2], 6*CHAN_CONFIRM_DEPTH + 1 - nodes[2].best_block_info().1); + connect_blocks(&nodes[3], 6*CHAN_CONFIRM_DEPTH + 1 - nodes[3].best_block_info().1); + connect_blocks(&nodes[4], 6*CHAN_CONFIRM_DEPTH + 1 - nodes[4].best_block_info().1); + connect_blocks(&nodes[5], 6*CHAN_CONFIRM_DEPTH + 1 - nodes[5].best_block_info().1); + let amt_msat = 15_000_000; let (payment_preimage, payment_hash, payment_secret) = get_payment_preimage_hash(&nodes[5], Some(amt_msat), None); let route_params = { @@ -1070,6 +1084,12 @@ fn blinded_path_retries() { let chan_1_3 = create_announced_chan_between_nodes_with_value(&nodes, 1, 3, 1_000_000, 0); let chan_2_3 = create_announced_chan_between_nodes_with_value(&nodes, 2, 3, 1_000_000, 0); + // Ensure all nodes start at the same height. + connect_blocks(&nodes[0], 4*CHAN_CONFIRM_DEPTH + 1 - nodes[0].best_block_info().1); + connect_blocks(&nodes[1], 4*CHAN_CONFIRM_DEPTH + 1 - nodes[1].best_block_info().1); + connect_blocks(&nodes[2], 4*CHAN_CONFIRM_DEPTH + 1 - nodes[2].best_block_info().1); + connect_blocks(&nodes[3], 4*CHAN_CONFIRM_DEPTH + 1 - nodes[3].best_block_info().1); + let amt_msat = 5000; let (_, payment_hash, payment_secret) = get_payment_preimage_hash(&nodes[3], Some(amt_msat), None); let route_params = { diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 00a536539ea..99374c7bb54 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -40,7 +40,7 @@ use crate::blinded_path::payment::{AsyncBolt12OfferContext, BlindedPaymentPath, use crate::chain; use crate::chain::{Confirm, ChannelMonitorUpdateStatus, Watch, BestBlock}; use crate::chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, FeeEstimator, LowerBoundedFeeEstimator}; -use crate::chain::channelmonitor::{Balance, ChannelMonitor, ChannelMonitorUpdate, WithChannelMonitor, ChannelMonitorUpdateStep, HTLC_FAIL_BACK_BUFFER, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY, MonitorEvent}; +use crate::chain::channelmonitor::{Balance, ChannelMonitor, ChannelMonitorUpdate, WithChannelMonitor, ChannelMonitorUpdateStep, HTLC_FAIL_BACK_BUFFER, MAX_BLOCKS_FOR_CONF, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY, MonitorEvent}; use crate::chain::transaction::{OutPoint, TransactionData}; use crate::events::{self, Event, EventHandler, EventsProvider, InboundChannelFunds, ClosureReason, HTLCDestination, PaymentFailureReason, ReplayEvent}; // Since this struct is returned in `list_channels` methods, expose it here in case users want to @@ -2824,7 +2824,7 @@ pub const BREAKDOWN_TIMEOUT: u16 = 6 * 24; pub(crate) const MAX_LOCAL_BREAKDOWN_TIMEOUT: u16 = 2 * 6 * 24 * 7; /// The minimum number of blocks between an inbound HTLC's CLTV and the corresponding outbound -/// HTLC's CLTV. The current default represents roughly seven hours of blocks at six blocks/hour. +/// HTLC's CLTV. The current default represents roughly eight hours of blocks at six blocks/hour. /// /// This can be increased (but not decreased) through [`ChannelConfig::cltv_expiry_delta`] /// @@ -2833,7 +2833,7 @@ pub(crate) const MAX_LOCAL_BREAKDOWN_TIMEOUT: u16 = 2 * 6 * 24 * 7; // i.e. the node we forwarded the payment on to should always have enough room to reliably time out // the HTLC via a full update_fail_htlc/commitment_signed dance before we hit the // CLTV_CLAIM_BUFFER point (we static assert that it's at least 3 blocks more). -pub const MIN_CLTV_EXPIRY_DELTA: u16 = 6*7; +pub const MIN_CLTV_EXPIRY_DELTA: u16 = 6*8; // This should be long enough to allow a payment path drawn across multiple routing hops with substantial // `cltv_expiry_delta`. Indeed, the length of those values is the reaction delay offered to a routing node // in case of HTLC on-chain settlement. While appearing less competitive, a node operator could decide to @@ -2850,19 +2850,34 @@ pub(super) const CLTV_FAR_FAR_AWAY: u32 = 14 * 24 * 6; // a payment was being routed, so we add an extra block to be safe. pub const MIN_FINAL_CLTV_EXPIRY_DELTA: u16 = HTLC_FAIL_BACK_BUFFER as u16 + 3; -// Check that our CLTV_EXPIRY is at least CLTV_CLAIM_BUFFER + ANTI_REORG_DELAY + LATENCY_GRACE_PERIOD_BLOCKS, -// ie that if the next-hop peer fails the HTLC within -// LATENCY_GRACE_PERIOD_BLOCKS then we'll still have CLTV_CLAIM_BUFFER left to timeout it onchain, -// then waiting ANTI_REORG_DELAY to be reorg-safe on the outbound HLTC and -// failing the corresponding htlc backward, and us now seeing the last block of ANTI_REORG_DELAY before -// LATENCY_GRACE_PERIOD_BLOCKS. -#[allow(dead_code)] -const CHECK_CLTV_EXPIRY_SANITY: u32 = MIN_CLTV_EXPIRY_DELTA as u32 - LATENCY_GRACE_PERIOD_BLOCKS - CLTV_CLAIM_BUFFER - ANTI_REORG_DELAY - LATENCY_GRACE_PERIOD_BLOCKS; +// Check that our MIN_CLTV_EXPIRY_DELTA gives us enough time to get everything on chain and locked +// in with enough time left to fail the corresponding HTLC back to our inbound edge before they +// force-close on us. +// In other words, if the next-hop peer fails HTLC LATENCY_GRACE_PERIOD_BLOCKS after our +// CLTV_CLAIM_BUFFER (because that's how many blocks we allow them after expiry), we'll still have +// 2*MAX_BLOCKS_FOR_CONF + ANTI_REORG_DELAY left to get two transactions on chain and the second +// fully locked in before the peer force-closes on us (LATENCY_GRACE_PERIOD_BLOCKS before the +// expiry, i.e. assuming the peer force-closes right at the expiry and we're behind by +// LATENCY_GRACE_PERIOD_BLOCKS). +const _CHECK_CLTV_EXPIRY_SANITY: () = assert!( + MIN_CLTV_EXPIRY_DELTA as u32 >= 2*LATENCY_GRACE_PERIOD_BLOCKS + 2*MAX_BLOCKS_FOR_CONF + ANTI_REORG_DELAY +); + +// Check that our MIN_CLTV_EXPIRY_DELTA gives us enough time to get the HTLC preimage back to our +// counterparty if the outbound edge gives us the preimage only one block before we'd force-close +// the channel. +// ie they provide the preimage LATENCY_GRACE_PERIOD_BLOCKS - 1 after the HTLC expires, then we +// pass the preimage back, which takes LATENCY_GRACE_PERIOD_BLOCKS to complete, and we want to make +// sure this all happens at least N blocks before the inbound HTLC expires (where N is the +// counterparty's CLTV_CLAIM_BUFFER or equivalent). +const _ASSUMED_COUNTERPARTY_CLTV_CLAIM_BUFFER: u32 = 6 * 6; -// Check for ability of an attacker to make us fail on-chain by delaying an HTLC claim. See -// ChannelMonitor::should_broadcast_holder_commitment_txn for a description of why this is needed. -#[allow(dead_code)] -const CHECK_CLTV_EXPIRY_SANITY_2: u32 = MIN_CLTV_EXPIRY_DELTA as u32 - LATENCY_GRACE_PERIOD_BLOCKS - 2*CLTV_CLAIM_BUFFER; +const _CHECK_COUNTERPARTY_REALISTIC: () = + assert!(_ASSUMED_COUNTERPARTY_CLTV_CLAIM_BUFFER >= CLTV_CLAIM_BUFFER); + +const _CHECK_CLTV_EXPIRY_OFFCHAIN: () = assert!( + MIN_CLTV_EXPIRY_DELTA as u32 >= 2*LATENCY_GRACE_PERIOD_BLOCKS - 1 + _ASSUMED_COUNTERPARTY_CLTV_CLAIM_BUFFER +); /// The number of ticks of [`ChannelManager::timer_tick_occurred`] until expiry of incomplete MPPs pub(crate) const MPP_TIMEOUT_TICKS: u8 = 3; @@ -15979,7 +15994,7 @@ mod tests { let current_height: u32 = node[0].node.best_block.read().unwrap().height; let result = create_recv_pending_htlc_info(msgs::InboundOnionPayload::Receive(msgs::InboundOnionReceivePayload { sender_intended_htlc_amt_msat: 100, - cltv_expiry_height: 22, + cltv_expiry_height: TEST_FINAL_CLTV, payment_metadata: None, keysend_preimage: None, payment_data: Some(msgs::FinalOnionHopData { @@ -15987,7 +16002,7 @@ mod tests { total_msat: 100, }), custom_tlvs: Vec::new(), - }), [0; 32], PaymentHash([0; 32]), 100, 23, None, true, None, current_height); + }), [0; 32], PaymentHash([0; 32]), 100, TEST_FINAL_CLTV + 1, None, true, None, current_height); // Should not return an error as this condition: // https://github.com/lightning/bolts/blob/4dcc377209509b13cf89a4b91fde7d478f5b46d8/04-onion-routing.md?plain=1#L334 diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index 1d1884b8d58..03ae9872820 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -3500,7 +3500,7 @@ fn do_test_htlc_on_chain_timeout(connect_style: ConnectStyle) { let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // 1 timeout tx assert_eq!(node_txn.len(), 1); check_spends!(node_txn[0], commitment_tx[0]); - assert_eq!(node_txn[0].clone().input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT); + assert_eq!(node_txn[0].clone().input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT + 1); } #[xtest(feature = "_externalize_tests")] @@ -5362,115 +5362,119 @@ pub fn test_onchain_to_onchain_claim() { #[xtest(feature = "_externalize_tests")] pub fn test_duplicate_payment_hash_one_failure_one_success() { // Topology : A --> B --> C --> D - // We route 2 payments with same hash between B and C, one will be timeout, the other successfully claim - // Note that because C will refuse to generate two payment secrets for the same payment hash, - // we forward one of the payments onwards to D. - let chanmon_cfgs = create_chanmon_cfgs(4); - let node_cfgs = create_node_cfgs(4, &chanmon_cfgs); + // \-> E + // We route 2 payments with same hash between B and C, one we will time out on chain, the other + // successfully claim. + let chanmon_cfgs = create_chanmon_cfgs(5); + let node_cfgs = create_node_cfgs(5, &chanmon_cfgs); // When this test was written, the default base fee floated based on the HTLC count. // It is now fixed, so we simply set the fee to the expected value here. let mut config = test_default_channel_config(); config.channel_config.forwarding_fee_base_msat = 196; - let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, - &[Some(config.clone()), Some(config.clone()), Some(config.clone()), Some(config.clone())]); - let mut nodes = create_network(4, &node_cfgs, &node_chanmgrs); + let node_chanmgrs = create_node_chanmgrs(5, &node_cfgs, + &[Some(config.clone()), Some(config.clone()), Some(config.clone()), Some(config.clone()), Some(config.clone())]); + let mut nodes = create_network(5, &node_cfgs, &node_chanmgrs); + // Create the required channels and route one HTLC from A to D and another from A to E. create_announced_chan_between_nodes(&nodes, 0, 1); let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2); create_announced_chan_between_nodes(&nodes, 2, 3); + create_announced_chan_between_nodes(&nodes, 2, 4); let node_max_height = nodes.iter().map(|node| node.blocks.lock().unwrap().len()).max().unwrap() as u32; - connect_blocks(&nodes[0], node_max_height - nodes[0].best_block_info().1); - connect_blocks(&nodes[1], node_max_height - nodes[1].best_block_info().1); - connect_blocks(&nodes[2], node_max_height - nodes[2].best_block_info().1); - connect_blocks(&nodes[3], node_max_height - nodes[3].best_block_info().1); - - let (our_payment_preimage, duplicate_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 900_000); - - let payment_secret = nodes[3].node.create_inbound_payment_for_hash(duplicate_payment_hash, None, 7200, None).unwrap(); - // We reduce the final CLTV here by a somewhat arbitrary constant to keep it under the one-byte - // script push size limit so that the below script length checks match - // ACCEPTED_HTLC_SCRIPT_WEIGHT. - let payment_params = PaymentParameters::from_node_id(nodes[3].node.get_our_node_id(), TEST_FINAL_CLTV - 40) - .with_bolt11_features(nodes[3].node.bolt11_invoice_features()).unwrap(); - let (route, _, _, _) = get_route_and_payment_hash!(nodes[0], nodes[3], payment_params, 800_000); - send_along_route_with_secret(&nodes[0], route, &[&[&nodes[1], &nodes[2], &nodes[3]]], 800_000, duplicate_payment_hash, payment_secret); - + connect_blocks(&nodes[0], node_max_height * 2 - nodes[0].best_block_info().1); + connect_blocks(&nodes[1], node_max_height * 2 - nodes[1].best_block_info().1); + connect_blocks(&nodes[2], node_max_height * 2 - nodes[2].best_block_info().1); + connect_blocks(&nodes[3], node_max_height * 2 - nodes[3].best_block_info().1); + connect_blocks(&nodes[4], node_max_height * 2 - nodes[4].best_block_info().1); + + let (our_payment_preimage, duplicate_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2], &nodes[3]], 900_000); + + let payment_secret = nodes[4].node.create_inbound_payment_for_hash(duplicate_payment_hash, None, 7200, None).unwrap(); + let payment_params = PaymentParameters::from_node_id(nodes[4].node.get_our_node_id(), TEST_FINAL_CLTV) + .with_bolt11_features(nodes[4].node.bolt11_invoice_features()).unwrap(); + let (route, _, _, _) = get_route_and_payment_hash!(nodes[0], nodes[4], payment_params, 800_000); + send_along_route_with_secret(&nodes[0], route, &[&[&nodes[1], &nodes[2], &nodes[4]]], 800_000, duplicate_payment_hash, payment_secret); + + // Now mine C's commitment transaction on node B and mine enough blocks to get the HTLC timeout + // transaction (which we'll split in two so that we can resolve the HTLCs differently). let commitment_txn = get_local_commitment_txn!(nodes[2], chan_2.2); assert_eq!(commitment_txn[0].input.len(), 1); + assert_eq!(commitment_txn[0].output.len(), 3); check_spends!(commitment_txn[0], chan_2.3); mine_transaction(&nodes[1], &commitment_txn[0]); check_closed_broadcast!(nodes[1], true); check_added_monitors!(nodes[1], 1); check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[2].node.get_our_node_id()], 100000); - connect_blocks(&nodes[1], TEST_FINAL_CLTV - 40 + MIN_CLTV_EXPIRY_DELTA as u32); // Confirm blocks until the HTLC expires - let htlc_timeout_tx; - { // Extract one of the two HTLC-Timeout transaction - let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); - // ChannelMonitor: timeout tx * 2-or-3 - assert!(node_txn.len() == 2 || node_txn.len() == 3); + // Confirm blocks until both HTLCs expire and get a transaction which times out one HTLC. + connect_blocks(&nodes[1], TEST_FINAL_CLTV + config.channel_config.cltv_expiry_delta as u32); - check_spends!(node_txn[0], commitment_txn[0]); - assert_eq!(node_txn[0].input.len(), 1); - assert_eq!(node_txn[0].output.len(), 1); - - if node_txn.len() > 2 { - check_spends!(node_txn[1], commitment_txn[0]); - assert_eq!(node_txn[1].input.len(), 1); - assert_eq!(node_txn[1].output.len(), 1); - assert_eq!(node_txn[0].input[0].previous_output, node_txn[1].input[0].previous_output); - - check_spends!(node_txn[2], commitment_txn[0]); - assert_eq!(node_txn[2].input.len(), 1); - assert_eq!(node_txn[2].output.len(), 1); - assert_ne!(node_txn[0].input[0].previous_output, node_txn[2].input[0].previous_output); - } else { - check_spends!(node_txn[1], commitment_txn[0]); - assert_eq!(node_txn[1].input.len(), 1); - assert_eq!(node_txn[1].output.len(), 1); - assert_ne!(node_txn[0].input[0].previous_output, node_txn[1].input[0].previous_output); - } + let htlc_timeout_tx = { + let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); + assert_eq!(node_txn.len(), 1); - assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT); - assert_eq!(node_txn[1].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT); - // Assign htlc_timeout_tx to the forwarded HTLC (with value ~800 sats). The received HTLC - // (with value 900 sats) will be claimed in the below `claim_funds` call. - if node_txn.len() > 2 { - assert_eq!(node_txn[2].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT); - htlc_timeout_tx = if node_txn[2].output[0].value.to_sat() < 900 { node_txn[2].clone() } else { node_txn[0].clone() }; - } else { - htlc_timeout_tx = if node_txn[0].output[0].value.to_sat() < 900 { node_txn[1].clone() } else { node_txn[0].clone() }; + let mut tx = node_txn.pop().unwrap(); + check_spends!(tx, commitment_txn[0]); + assert_eq!(tx.input.len(), 2); + assert_eq!(tx.output.len(), 1); + // Note that the witness script lengths are one longer than our constant as the CLTV value + // went to two bytes rather than one. + assert_eq!(tx.input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT + 1); + assert_eq!(tx.input[1].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT + 1); + + // Split the HTLC claim transaction into two, one for each HTLC. + if commitment_txn[0].output[tx.input[1].previous_output.vout as usize].value.to_sat() < 850 { + tx.input.remove(1); } - } + if commitment_txn[0].output[tx.input[0].previous_output.vout as usize].value.to_sat() < 850 { + tx.input.remove(0); + } + assert_eq!(tx.input.len(), 1); + tx + }; - nodes[2].node.claim_funds(our_payment_preimage); - expect_payment_claimed!(nodes[2], duplicate_payment_hash, 900_000); + // Now give node E the payment preimage and pass it back to C. + nodes[4].node.claim_funds(our_payment_preimage); + expect_payment_claimed!(nodes[4], duplicate_payment_hash, 800_000); + check_added_monitors!(nodes[4], 1); + let updates = get_htlc_update_msgs!(nodes[4], nodes[2].node.get_our_node_id()); + nodes[2].node.handle_update_fulfill_htlc(nodes[4].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]); + let _cs_updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); + expect_payment_forwarded!(nodes[2], nodes[1], nodes[4], Some(196), false, false); + check_added_monitors!(nodes[2], 1); + commitment_signed_dance!(nodes[2], nodes[4], &updates.commitment_signed, false); + // Mine the commitment transaction on node C and get the HTLC success transactions it will + // generate (note that the ChannelMonitor doesn't differentiate between HTLCs once it has the + // preimage). mine_transaction(&nodes[2], &commitment_txn[0]); - check_added_monitors!(nodes[2], 2); + check_added_monitors!(nodes[2], 1); check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000); - let events = nodes[2].node.get_and_clear_pending_msg_events(); - match events[0] { - MessageSendEvent::UpdateHTLCs { .. } => {}, - _ => panic!("Unexpected event"), - } - match events[2] { - MessageSendEvent::BroadcastChannelUpdate { .. } => {}, - _ => panic!("Unexepected event"), - } + check_closed_broadcast(&nodes[2], 1, true); + let htlc_success_txn: Vec<_> = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); assert_eq!(htlc_success_txn.len(), 2); // ChannelMonitor: HTLC-Success txn (*2 due to 2-HTLC outputs) check_spends!(htlc_success_txn[0], commitment_txn[0]); check_spends!(htlc_success_txn[1], commitment_txn[0]); assert_eq!(htlc_success_txn[0].input.len(), 1); - assert_eq!(htlc_success_txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT); + // Note that the witness script lengths are one longer than our constant as the CLTV value went + // to two bytes rather than one. + assert_eq!(htlc_success_txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT + 1); assert_eq!(htlc_success_txn[1].input.len(), 1); - assert_eq!(htlc_success_txn[1].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT); + assert_eq!(htlc_success_txn[1].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT + 1); assert_ne!(htlc_success_txn[0].input[0].previous_output, htlc_success_txn[1].input[0].previous_output); - assert_ne!(htlc_success_txn[1].input[0].previous_output, htlc_timeout_tx.input[0].previous_output); + let htlc_success_tx_to_confirm = + if htlc_success_txn[0].input[0].previous_output == htlc_timeout_tx.input[0].previous_output { + &htlc_success_txn[1] + } else { + &htlc_success_txn[0] + }; + assert_ne!(htlc_success_tx_to_confirm.input[0].previous_output, htlc_timeout_tx.input[0].previous_output); + + // Mine the HTLC timeout transaction on node B. mine_transaction(&nodes[1], &htlc_timeout_tx); connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]); @@ -5484,14 +5488,13 @@ pub fn test_duplicate_payment_hash_one_failure_one_success() { nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &htlc_updates.update_fail_htlcs[0]); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - { - commitment_signed_dance!(nodes[0], nodes[1], &htlc_updates.commitment_signed, false, true); - } + commitment_signed_dance!(nodes[0], nodes[1], &htlc_updates.commitment_signed, false, true); expect_payment_failed_with_update!(nodes[0], duplicate_payment_hash, false, chan_2.0.contents.short_channel_id, true); - // Solve 2nd HTLC by broadcasting on B's chain HTLC-Success Tx from C - mine_transaction(&nodes[1], &htlc_success_txn[1]); - expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(196), true, true); + // Finally, give node B the HTLC success transaction and ensure it extracts the preimage to + // provide to node A. + mine_transaction(&nodes[1], htlc_success_tx_to_confirm); + expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(392), true, true); let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); assert!(updates.update_add_htlcs.is_empty()); assert!(updates.update_fail_htlcs.is_empty()); @@ -7828,7 +7831,7 @@ pub fn test_bump_penalty_txn_on_revoked_commitment() { let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 59000000); let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0; - let payment_params = PaymentParameters::from_node_id(nodes[0].node.get_our_node_id(), 30) + let payment_params = PaymentParameters::from_node_id(nodes[0].node.get_our_node_id(), TEST_FINAL_CLTV) .with_bolt11_features(nodes[0].node.bolt11_invoice_features()).unwrap(); let (route,_, _, _) = get_route_and_payment_hash!(nodes[1], nodes[0], payment_params, 3000000); send_along_route(&nodes[1], route, &vec!(&nodes[0])[..], 3000000); @@ -9384,25 +9387,19 @@ fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain // Step (6): // Finally, check that Bob broadcasted a preimage-claiming transaction for the HTLC output on the // broadcasted commitment transaction. - { - let script_weight = match broadcast_alice { - true => OFFERED_HTLC_SCRIPT_WEIGHT, - false => ACCEPTED_HTLC_SCRIPT_WEIGHT - }; - // If Alice force-closed, Bob only broadcasts a HTLC-output-claiming transaction. Otherwise, - // Bob force-closed and broadcasts the commitment transaction along with a - // HTLC-output-claiming transaction. - let mut bob_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); - if broadcast_alice { - assert_eq!(bob_txn.len(), 1); - check_spends!(bob_txn[0], txn_to_broadcast[0]); - assert_eq!(bob_txn[0].input[0].witness.last().unwrap().len(), script_weight); - } else { - assert_eq!(bob_txn.len(), if nodes[1].connect_style.borrow().updates_best_block_first() { 3 } else { 2 }); - let htlc_tx = bob_txn.pop().unwrap(); - check_spends!(htlc_tx, txn_to_broadcast[0]); - assert_eq!(htlc_tx.input[0].witness.last().unwrap().len(), script_weight); - } + // If Alice force-closed, Bob only broadcasts a HTLC-output-claiming transaction. Otherwise, + // Bob force-closed and broadcasts the commitment transaction along with a + // HTLC-output-claiming transaction. + let mut bob_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); + if broadcast_alice { + assert_eq!(bob_txn.len(), 1); + check_spends!(bob_txn[0], txn_to_broadcast[0]); + assert_eq!(bob_txn[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); + } else { + assert_eq!(bob_txn.len(), if nodes[1].connect_style.borrow().updates_best_block_first() { 3 } else { 2 }); + let htlc_tx = bob_txn.pop().unwrap(); + check_spends!(htlc_tx, txn_to_broadcast[0]); + assert_eq!(htlc_tx.input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT + 1); } } diff --git a/lightning/src/ln/invoice_utils.rs b/lightning/src/ln/invoice_utils.rs index 3c67b7c581f..a88f6067b16 100644 --- a/lightning/src/ln/invoice_utils.rs +++ b/lightning/src/ln/invoice_utils.rs @@ -711,6 +711,7 @@ mod test { use bitcoin::hashes::sha256::Hash as Sha256; use bitcoin::network::Network; use crate::sign::PhantomKeysManager; + use crate::chain::channelmonitor::HTLC_FAIL_BACK_BUFFER; use crate::types::payment::{PaymentHash, PaymentPreimage}; use crate::ln::channelmanager::{Bolt11InvoiceParameters, PhantomRouteHints, MIN_FINAL_CLTV_EXPIRY_DELTA, PaymentId, RecipientOnionFields, Retry}; use crate::ln::functional_test_utils::*; @@ -843,7 +844,7 @@ mod test { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - let custom_min_final_cltv_expiry_delta = Some(21); + let custom_min_final_cltv_expiry_delta = Some(HTLC_FAIL_BACK_BUFFER as u16); let description = Bolt11InvoiceDescription::Direct(Description::empty()); let invoice_params = Bolt11InvoiceParameters { amount_msats: Some(10_000), diff --git a/lightning/src/ln/max_payment_path_len_tests.rs b/lightning/src/ln/max_payment_path_len_tests.rs index 3c424c9a393..95a1fbaaa10 100644 --- a/lightning/src/ln/max_payment_path_len_tests.rs +++ b/lightning/src/ln/max_payment_path_len_tests.rs @@ -156,6 +156,11 @@ fn one_hop_blinded_path_with_custom_tlv() { create_announced_chan_between_nodes(&nodes, 0, 1); let chan_upd_1_2 = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 1_000_000, 0).0.contents; + // Start with all nodes at the same height + connect_blocks(&nodes[0], 2*CHAN_CONFIRM_DEPTH + 1 - nodes[0].best_block_info().1); + connect_blocks(&nodes[1], 2*CHAN_CONFIRM_DEPTH + 1 - nodes[1].best_block_info().1); + connect_blocks(&nodes[2], 2*CHAN_CONFIRM_DEPTH + 1 - nodes[2].best_block_info().1); + // Construct the route parameters for sending to nodes[2]'s 1-hop blinded path. let amt_msat = 100_000; let (payment_preimage, payment_hash, payment_secret) = get_payment_preimage_hash(&nodes[2], Some(amt_msat), None); diff --git a/lightning/src/ln/onion_payment.rs b/lightning/src/ln/onion_payment.rs index bccd994bbff..cd2ef491846 100644 --- a/lightning/src/ln/onion_payment.rs +++ b/lightning/src/ln/onion_payment.rs @@ -493,7 +493,8 @@ mod tests { use bitcoin::secp256k1::{PublicKey, Secp256k1, SecretKey}; use crate::ln::types::ChannelId; use crate::types::payment::{PaymentPreimage, PaymentHash, PaymentSecret}; - use crate::ln::channelmanager::RecipientOnionFields; + use crate::ln::channelmanager::{RecipientOnionFields, MIN_CLTV_EXPIRY_DELTA}; + use crate::ln::functional_test_utils::TEST_FINAL_CLTV; use crate::types::features::{ChannelFeatures, NodeFeatures}; use crate::ln::msgs; use crate::ln::onion_utils::create_payment_onion; @@ -624,7 +625,7 @@ mod tests { RouteHop { pubkey: hop_pk, fee_msat: hop_fee, - cltv_expiry_delta: 42, + cltv_expiry_delta: MIN_CLTV_EXPIRY_DELTA as u32, short_channel_id: 1, node_features: NodeFeatures::empty(), channel_features: ChannelFeatures::empty(), @@ -633,7 +634,7 @@ mod tests { RouteHop { pubkey: recipient_pk, fee_msat: recipient_amount, - cltv_expiry_delta: 42, + cltv_expiry_delta: TEST_FINAL_CLTV, short_channel_id: 2, node_features: NodeFeatures::empty(), channel_features: ChannelFeatures::empty(), diff --git a/lightning/src/ln/priv_short_conf_tests.rs b/lightning/src/ln/priv_short_conf_tests.rs index a63e4264805..c482d97ea8b 100644 --- a/lightning/src/ln/priv_short_conf_tests.rs +++ b/lightning/src/ln/priv_short_conf_tests.rs @@ -246,7 +246,7 @@ fn test_routed_scid_alias() { htlc_maximum_msat: None, htlc_minimum_msat: None, }])]; - let payment_params = PaymentParameters::from_node_id(nodes[2].node.get_our_node_id(), 42) + let payment_params = PaymentParameters::from_node_id(nodes[2].node.get_our_node_id(), TEST_FINAL_CLTV) .with_bolt11_features(nodes[2].node.bolt11_invoice_features()).unwrap() .with_route_hints(hop_hints).unwrap(); let (route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], payment_params, 100_000); @@ -412,7 +412,7 @@ fn test_inbound_scid_privacy() { htlc_maximum_msat: None, htlc_minimum_msat: None, }])]; - let payment_params = PaymentParameters::from_node_id(nodes[2].node.get_our_node_id(), 42) + let payment_params = PaymentParameters::from_node_id(nodes[2].node.get_our_node_id(), TEST_FINAL_CLTV) .with_bolt11_features(nodes[2].node.bolt11_invoice_features()).unwrap() .with_route_hints(hop_hints.clone()).unwrap(); let (route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], payment_params, 100_000); @@ -428,7 +428,7 @@ fn test_inbound_scid_privacy() { // what channel we're talking about. hop_hints[0].0[0].short_channel_id = last_hop[0].short_channel_id.unwrap(); - let payment_params_2 = PaymentParameters::from_node_id(nodes[2].node.get_our_node_id(), 42) + let payment_params_2 = PaymentParameters::from_node_id(nodes[2].node.get_our_node_id(), TEST_FINAL_CLTV) .with_bolt11_features(nodes[2].node.bolt11_invoice_features()).unwrap() .with_route_hints(hop_hints).unwrap(); let (route_2, payment_hash_2, _, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[2], payment_params_2, 100_000);