Skip to content

Commit ddeaab6

Browse files
Merge pull request #3340 from wvanlint/claim_batching
Batch on-chain claims more aggressively per channel
2 parents a688f1c + 0fe90c6 commit ddeaab6

File tree

5 files changed

+855
-752
lines changed

5 files changed

+855
-752
lines changed

lightning/src/chain/channelmonitor.rs

+10-3
Original file line numberDiff line numberDiff line change
@@ -216,9 +216,16 @@ impl_writeable_tlv_based!(HTLCUpdate, {
216216
(4, payment_preimage, option),
217217
});
218218

219-
/// If an HTLC expires within this many blocks, don't try to claim it in a shared transaction,
220-
/// instead claiming it in its own individual transaction.
221-
pub(crate) const CLTV_SHARED_CLAIM_BUFFER: u32 = 12;
219+
/// If an output goes from claimable only by us to claimable by us or our counterparty within this
220+
/// many blocks, we consider it pinnable for the purposes of aggregating claims in a single
221+
/// transaction.
222+
pub(crate) const COUNTERPARTY_CLAIMABLE_WITHIN_BLOCKS_PINNABLE: u32 = 12;
223+
224+
/// When we go to force-close a channel because an HTLC is expiring, we should ensure that the
225+
/// HTLC(s) expiring are not considered pinnable, allowing us to aggregate them with other HTLC(s)
226+
/// expiring at the same time.
227+
const _: () = assert!(CLTV_CLAIM_BUFFER > COUNTERPARTY_CLAIMABLE_WITHIN_BLOCKS_PINNABLE);
228+
222229
/// If an HTLC expires within this many blocks, force-close the channel to broadcast the
223230
/// HTLC-Success transaction.
224231
/// In other words, this is an upper bound on how many blocks we think it can take us to get a

lightning/src/chain/onchaintx.rs

+49-31
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@ use crate::types::payment::PaymentPreimage;
3131
use crate::ln::chan_utils::{self, ChannelTransactionParameters, HTLCOutputInCommitment, HolderCommitmentTransaction};
3232
use crate::chain::ClaimId;
3333
use crate::chain::chaininterface::{FeeEstimator, BroadcasterInterface, LowerBoundedFeeEstimator};
34-
use crate::chain::channelmonitor::{ANTI_REORG_DELAY, CLTV_SHARED_CLAIM_BUFFER};
34+
use crate::chain::channelmonitor::ANTI_REORG_DELAY;
3535
use crate::chain::package::{PackageSolvingData, PackageTemplate};
3636
use crate::chain::transaction::MaybeSignedTransaction;
3737
use crate::util::logger::Logger;
@@ -726,7 +726,7 @@ impl<ChannelSigner: EcdsaChannelSigner> OnchainTxHandler<ChannelSigner> {
726726
/// does not need to equal the current blockchain tip height, which should be provided via
727727
/// `cur_height`, however it must never be higher than `cur_height`.
728728
pub(super) fn update_claims_view_from_requests<B: Deref, F: Deref, L: Logger>(
729-
&mut self, requests: Vec<PackageTemplate>, conf_height: u32, cur_height: u32,
729+
&mut self, mut requests: Vec<PackageTemplate>, conf_height: u32, cur_height: u32,
730730
broadcaster: &B, conf_target: ConfirmationTarget,
731731
fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
732732
) where
@@ -737,49 +737,67 @@ impl<ChannelSigner: EcdsaChannelSigner> OnchainTxHandler<ChannelSigner> {
737737
log_debug!(logger, "Updating claims view at height {} with {} claim requests", cur_height, requests.len());
738738
}
739739

740-
let mut preprocessed_requests = Vec::with_capacity(requests.len());
741-
let mut aggregated_request = None;
742-
743-
// Try to aggregate outputs if their timelock expiration isn't imminent (package timelock
744-
// <= CLTV_SHARED_CLAIM_BUFFER) and they don't require an immediate nLockTime (aggregable).
745-
for req in requests {
746-
// Don't claim a outpoint twice that would be bad for privacy and may uselessly lock a CPFP input for a while
747-
if let Some(_) = self.claimable_outpoints.get(req.outpoints()[0]) {
748-
log_info!(logger, "Ignoring second claim for outpoint {}:{}, already registered its claiming request", req.outpoints()[0].txid, req.outpoints()[0].vout);
740+
// First drop any duplicate claims.
741+
requests.retain(|req| {
742+
debug_assert_eq!(
743+
req.outpoints().len(),
744+
1,
745+
"Claims passed to `update_claims_view_from_requests` should not be aggregated"
746+
);
747+
let mut all_outpoints_claiming = true;
748+
for outpoint in req.outpoints() {
749+
if self.claimable_outpoints.get(outpoint).is_none() {
750+
all_outpoints_claiming = false;
751+
}
752+
}
753+
if all_outpoints_claiming {
754+
log_info!(logger, "Ignoring second claim for outpoint {}:{}, already registered its claiming request",
755+
req.outpoints()[0].txid, req.outpoints()[0].vout);
756+
false
749757
} else {
750758
let timelocked_equivalent_package = self.locktimed_packages.iter().map(|v| v.1.iter()).flatten()
751759
.find(|locked_package| locked_package.outpoints() == req.outpoints());
752760
if let Some(package) = timelocked_equivalent_package {
753761
log_info!(logger, "Ignoring second claim for outpoint {}:{}, we already have one which we're waiting on a timelock at {} for.",
754762
req.outpoints()[0].txid, req.outpoints()[0].vout, package.package_locktime(cur_height));
755-
continue;
763+
false
764+
} else {
765+
true
756766
}
767+
}
768+
});
757769

758-
let package_locktime = req.package_locktime(cur_height);
759-
if package_locktime > cur_height + 1 {
760-
log_info!(logger, "Delaying claim of package until its timelock at {} (current height {}), the following outpoints are spent:", package_locktime, cur_height);
761-
for outpoint in req.outpoints() {
762-
log_info!(logger, " Outpoint {}", outpoint);
770+
// Then try to maximally aggregate `requests`.
771+
for i in (1..requests.len()).rev() {
772+
for j in 0..i {
773+
if requests[i].can_merge_with(&requests[j], cur_height) {
774+
let merge = requests.remove(i);
775+
if let Err(rejected) = requests[j].merge_package(merge, cur_height) {
776+
debug_assert!(false, "Merging package should not be rejected after verifying can_merge_with.");
777+
requests.insert(i, rejected);
778+
} else {
779+
break;
763780
}
764-
self.locktimed_packages.entry(package_locktime).or_default().push(req);
765-
continue;
766781
}
782+
}
783+
}
767784

768-
log_trace!(logger, "Test if outpoint which our counterparty can spend at {} can be aggregated based on aggregation limit {}", req.counterparty_spendable_height(), cur_height + CLTV_SHARED_CLAIM_BUFFER);
769-
if req.counterparty_spendable_height() <= cur_height + CLTV_SHARED_CLAIM_BUFFER || !req.aggregable() {
770-
preprocessed_requests.push(req);
771-
} else if aggregated_request.is_none() {
772-
aggregated_request = Some(req);
773-
} else {
774-
aggregated_request.as_mut().unwrap().merge_package(req);
785+
// Finally, split requests into timelocked ones and immediately-spendable ones.
786+
let mut preprocessed_requests = Vec::with_capacity(requests.len());
787+
for req in requests {
788+
let package_locktime = req.package_locktime(cur_height);
789+
if package_locktime > cur_height + 1 {
790+
log_info!(logger, "Delaying claim of package until its timelock at {} (current height {}), the following outpoints are spent:", package_locktime, cur_height);
791+
for outpoint in req.outpoints() {
792+
log_info!(logger, " Outpoint {}", outpoint);
775793
}
794+
self.locktimed_packages.entry(package_locktime).or_default().push(req);
795+
} else {
796+
preprocessed_requests.push(req);
776797
}
777798
}
778-
if let Some(req) = aggregated_request {
779-
preprocessed_requests.push(req);
780-
}
781799

782-
// Claim everything up to and including `cur_height`
800+
// Claim everything up to and including `cur_height`.
783801
let remaining_locked_packages = self.locktimed_packages.split_off(&(cur_height + 1));
784802
if !self.locktimed_packages.is_empty() {
785803
log_debug!(logger,
@@ -1088,7 +1106,7 @@ impl<ChannelSigner: EcdsaChannelSigner> OnchainTxHandler<ChannelSigner> {
10881106
OnchainEvent::ContentiousOutpoint { package } => {
10891107
if let Some(pending_claim) = self.claimable_outpoints.get(package.outpoints()[0]) {
10901108
if let Some(request) = self.pending_claim_requests.get_mut(&pending_claim.0) {
1091-
request.merge_package(package);
1109+
assert!(request.merge_package(package, height).is_ok());
10921110
// Using a HashMap guarantee us than if we have multiple outpoints getting
10931111
// resurrected only one bump claim tx is going to be broadcast
10941112
bump_candidates.insert(pending_claim.clone(), request.clone());

0 commit comments

Comments
 (0)