Skip to content

Commit 8789c80

Browse files
TheBlueMattwvanlint
authored andcommitted
Move PackageTemplate merging decisions entirely into package.rs
Currently our package merging logic is strewn about between `package.rs` (which decides various flags based on the package type) and `onchaintx.rs` (which does the actual merging based on the derived flags as well as its own logic), making the logic hard to follow. Instead, here we consolidate the package merging logic entirely into `package.rs` with a new `PackageTemplate::can_merge_with` method that decides if merging can happen. We also simplify the merge pass in `update_claims_view_from_requests` to try to maximally merge by testing each pair of `PackageTemplate`s we're given to see if they can be merged. This is overly complicated (and inefficient) for today's merge logic, but over the coming commits we'll expand when we can merge and not having to think about the merge pass' behavior makes that much simpler (and O(N^2) for <1000 elements done only once when a commitment transaction confirms is fine).
1 parent b3fc536 commit 8789c80

File tree

3 files changed

+56
-35
lines changed

3 files changed

+56
-35
lines changed

lightning/src/chain/onchaintx.rs

Lines changed: 45 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@ use crate::types::payment::PaymentPreimage;
3131
use crate::ln::chan_utils::{self, ChannelTransactionParameters, HTLCOutputInCommitment, HolderCommitmentTransaction};
3232
use crate::chain::ClaimId;
3333
use crate::chain::chaininterface::{FeeEstimator, BroadcasterInterface, LowerBoundedFeeEstimator};
34-
use crate::chain::channelmonitor::{ANTI_REORG_DELAY, CLTV_SHARED_CLAIM_BUFFER};
34+
use crate::chain::channelmonitor::ANTI_REORG_DELAY;
3535
use crate::chain::package::{PackageSolvingData, PackageTemplate};
3636
use crate::chain::transaction::MaybeSignedTransaction;
3737
use crate::util::logger::Logger;
@@ -726,7 +726,7 @@ impl<ChannelSigner: EcdsaChannelSigner> OnchainTxHandler<ChannelSigner> {
726726
/// does not need to equal the current blockchain tip height, which should be provided via
727727
/// `cur_height`, however it must never be higher than `cur_height`.
728728
pub(super) fn update_claims_view_from_requests<B: Deref, F: Deref, L: Logger>(
729-
&mut self, requests: Vec<PackageTemplate>, conf_height: u32, cur_height: u32,
729+
&mut self, mut requests: Vec<PackageTemplate>, conf_height: u32, cur_height: u32,
730730
broadcaster: &B, conf_target: ConfirmationTarget,
731731
fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
732732
) where
@@ -737,49 +737,63 @@ impl<ChannelSigner: EcdsaChannelSigner> OnchainTxHandler<ChannelSigner> {
737737
log_debug!(logger, "Updating claims view at height {} with {} claim requests", cur_height, requests.len());
738738
}
739739

740-
let mut preprocessed_requests = Vec::with_capacity(requests.len());
741-
let mut aggregated_request = None;
742-
743-
// Try to aggregate outputs if their timelock expiration isn't imminent (package timelock
744-
// <= CLTV_SHARED_CLAIM_BUFFER) and they don't require an immediate nLockTime (aggregable).
745-
for req in requests {
746-
// Don't claim a outpoint twice that would be bad for privacy and may uselessly lock a CPFP input for a while
747-
if let Some(_) = self.claimable_outpoints.get(req.outpoints()[0]) {
748-
log_info!(logger, "Ignoring second claim for outpoint {}:{}, already registered its claiming request", req.outpoints()[0].txid, req.outpoints()[0].vout);
740+
// First drop any duplicate claims.
741+
requests.retain(|req| {
742+
debug_assert_eq!(
743+
req.outpoints().len(),
744+
1,
745+
"Claims passed to `update_claims_view_from_requests` should not be aggregated"
746+
);
747+
let mut all_outpoints_claiming = true;
748+
for outpoint in req.outpoints() {
749+
if self.claimable_outpoints.get(outpoint).is_none() {
750+
all_outpoints_claiming = false;
751+
}
752+
}
753+
if all_outpoints_claiming {
754+
log_info!(logger, "Ignoring second claim for outpoint {}:{}, already registered its claiming request",
755+
req.outpoints()[0].txid, req.outpoints()[0].vout);
756+
false
749757
} else {
750758
let timelocked_equivalent_package = self.locktimed_packages.iter().map(|v| v.1.iter()).flatten()
751759
.find(|locked_package| locked_package.outpoints() == req.outpoints());
752760
if let Some(package) = timelocked_equivalent_package {
753761
log_info!(logger, "Ignoring second claim for outpoint {}:{}, we already have one which we're waiting on a timelock at {} for.",
754762
req.outpoints()[0].txid, req.outpoints()[0].vout, package.package_locktime(cur_height));
755-
continue;
763+
false
764+
} else {
765+
true
756766
}
767+
}
768+
});
757769

758-
let package_locktime = req.package_locktime(cur_height);
759-
if package_locktime > cur_height + 1 {
760-
log_info!(logger, "Delaying claim of package until its timelock at {} (current height {}), the following outpoints are spent:", package_locktime, cur_height);
761-
for outpoint in req.outpoints() {
762-
log_info!(logger, " Outpoint {}", outpoint);
763-
}
764-
self.locktimed_packages.entry(package_locktime).or_default().push(req);
765-
continue;
770+
// Then try to maximally aggregate `requests`.
771+
for i in (1..requests.len()).rev() {
772+
for j in 0..i {
773+
if requests[i].can_merge_with(&requests[j], cur_height) {
774+
let merge = requests.remove(i);
775+
requests[j].merge_package(merge);
776+
break;
766777
}
778+
}
779+
}
767780

768-
log_trace!(logger, "Test if outpoint which our counterparty can spend at {} can be aggregated based on aggregation limit {}", req.counterparty_spendable_height(), cur_height + CLTV_SHARED_CLAIM_BUFFER);
769-
if req.counterparty_spendable_height() <= cur_height + CLTV_SHARED_CLAIM_BUFFER || !req.aggregable() {
770-
preprocessed_requests.push(req);
771-
} else if aggregated_request.is_none() {
772-
aggregated_request = Some(req);
773-
} else {
774-
aggregated_request.as_mut().unwrap().merge_package(req);
781+
// Finally, split requests into timelocked ones and immediately-spendable ones.
782+
let mut preprocessed_requests = Vec::with_capacity(requests.len());
783+
for req in requests {
784+
let package_locktime = req.package_locktime(cur_height);
785+
if package_locktime > cur_height + 1 {
786+
log_info!(logger, "Delaying claim of package until its timelock at {} (current height {}), the following outpoints are spent:", package_locktime, cur_height);
787+
for outpoint in req.outpoints() {
788+
log_info!(logger, " Outpoint {}", outpoint);
775789
}
790+
self.locktimed_packages.entry(package_locktime).or_default().push(req);
791+
} else {
792+
preprocessed_requests.push(req);
776793
}
777794
}
778-
if let Some(req) = aggregated_request {
779-
preprocessed_requests.push(req);
780-
}
781795

782-
// Claim everything up to and including `cur_height`
796+
// Claim everything up to and including `cur_height`.
783797
let remaining_locked_packages = self.locktimed_packages.split_off(&(cur_height + 1));
784798
if !self.locktimed_packages.is_empty() {
785799
log_debug!(logger,

lightning/src/chain/package.rs

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,7 @@ use crate::types::features::ChannelTypeFeatures;
3030
use crate::ln::channel_keys::{DelayedPaymentBasepoint, HtlcBasepoint};
3131
use crate::ln::channelmanager::MIN_CLTV_EXPIRY_DELTA;
3232
use crate::ln::msgs::DecodeError;
33+
use crate::chain::channelmonitor::CLTV_SHARED_CLAIM_BUFFER;
3334
use crate::chain::chaininterface::{FeeEstimator, ConfirmationTarget, MIN_RELAY_FEE_SAT_PER_1000_WEIGHT, compute_feerate_sat_per_1000_weight, FEERATE_FLOOR_SATS_PER_KW};
3435
use crate::chain::transaction::MaybeSignedTransaction;
3536
use crate::sign::ecdsa::EcdsaChannelSigner;
@@ -759,6 +760,12 @@ pub struct PackageTemplate {
759760
}
760761

761762
impl PackageTemplate {
763+
pub(crate) fn can_merge_with(&self, other: &PackageTemplate, cur_height: u32) -> bool {
764+
self.aggregable() && other.aggregable() &&
765+
self.package_locktime(cur_height) == other.package_locktime(cur_height) &&
766+
self.counterparty_spendable_height() > cur_height + CLTV_SHARED_CLAIM_BUFFER &&
767+
other.counterparty_spendable_height() > cur_height + CLTV_SHARED_CLAIM_BUFFER
768+
}
762769
pub(crate) fn is_malleable(&self) -> bool {
763770
self.malleability == PackageMalleability::Malleable
764771
}

lightning/src/ln/functional_tests.rs

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -7666,7 +7666,7 @@ fn test_bump_penalty_txn_on_revoked_htlcs() {
76667666
// Verify claim tx are spending revoked HTLC txn
76677667

76687668
// node_txn 0-2 each spend a separate revoked output from revoked_local_txn[0]
7669-
// Note that node_txn[0] and node_txn[1] are bogus - they double spend the revoked_htlc_txn
7669+
// Note that node_txn[1] and node_txn[2] are bogus - they double spend the revoked_htlc_txn
76707670
// which are included in the same block (they are broadcasted because we scan the
76717671
// transactions linearly and generate claims as we go, they likely should be removed in the
76727672
// future).
@@ -7683,8 +7683,8 @@ fn test_bump_penalty_txn_on_revoked_htlcs() {
76837683
assert_ne!(node_txn[0].input[0].previous_output, node_txn[2].input[0].previous_output);
76847684
assert_ne!(node_txn[1].input[0].previous_output, node_txn[2].input[0].previous_output);
76857685

7686-
assert_eq!(node_txn[0].input[0].previous_output, revoked_htlc_txn[1].input[0].previous_output);
7687-
assert_eq!(node_txn[1].input[0].previous_output, revoked_htlc_txn[0].input[0].previous_output);
7686+
assert_eq!(node_txn[1].input[0].previous_output, revoked_htlc_txn[1].input[0].previous_output);
7687+
assert_eq!(node_txn[2].input[0].previous_output, revoked_htlc_txn[0].input[0].previous_output);
76887688

76897689
// node_txn[3] spends the revoked outputs from the revoked_htlc_txn (which only have one
76907690
// output, checked above).
@@ -7696,7 +7696,7 @@ fn test_bump_penalty_txn_on_revoked_htlcs() {
76967696
// Store both feerates for later comparison
76977697
let fee_1 = revoked_htlc_txn[0].output[0].value + revoked_htlc_txn[1].output[0].value - node_txn[3].output[0].value;
76987698
feerate_1 = fee_1 * 1000 / node_txn[3].weight().to_wu();
7699-
penalty_txn = vec![node_txn[2].clone()];
7699+
penalty_txn = vec![node_txn[0].clone()];
77007700
node_txn.clear();
77017701
}
77027702

0 commit comments

Comments
 (0)