Skip to content

Batch on-chain claims more aggressively per channel #3340

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 5 commits into from
Dec 11, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
13 changes: 10 additions & 3 deletions lightning/src/chain/channelmonitor.rs
Original file line number Diff line number Diff line change
Expand Up @@ -216,9 +216,16 @@ impl_writeable_tlv_based!(HTLCUpdate, {
(4, payment_preimage, option),
});

/// If an HTLC expires within this many blocks, don't try to claim it in a shared transaction,
/// instead claiming it in its own individual transaction.
pub(crate) const CLTV_SHARED_CLAIM_BUFFER: u32 = 12;
/// If an output goes from claimable only by us to claimable by us or our counterparty within this
/// many blocks, we consider it pinnable for the purposes of aggregating claims in a single
/// transaction.
pub(crate) const COUNTERPARTY_CLAIMABLE_WITHIN_BLOCKS_PINNABLE: u32 = 12;

/// When we go to force-close a channel because an HTLC is expiring, we should ensure that the
/// HTLC(s) expiring are not considered pinnable, allowing us to aggregate them with other HTLC(s)
/// expiring at the same time.
const _: () = assert!(CLTV_CLAIM_BUFFER > COUNTERPARTY_CLAIMABLE_WITHIN_BLOCKS_PINNABLE);
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

TIL you can do this in our MSRV...lol we've got some code cleanup to do...


/// If an HTLC expires within this many blocks, force-close the channel to broadcast the
/// HTLC-Success transaction.
/// In other words, this is an upper bound on how many blocks we think it can take us to get a
Expand Down
80 changes: 49 additions & 31 deletions lightning/src/chain/onchaintx.rs
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ use crate::types::payment::PaymentPreimage;
use crate::ln::chan_utils::{self, ChannelTransactionParameters, HTLCOutputInCommitment, HolderCommitmentTransaction};
use crate::chain::ClaimId;
use crate::chain::chaininterface::{FeeEstimator, BroadcasterInterface, LowerBoundedFeeEstimator};
use crate::chain::channelmonitor::{ANTI_REORG_DELAY, CLTV_SHARED_CLAIM_BUFFER};
use crate::chain::channelmonitor::ANTI_REORG_DELAY;
use crate::chain::package::{PackageSolvingData, PackageTemplate};
use crate::chain::transaction::MaybeSignedTransaction;
use crate::util::logger::Logger;
Expand Down Expand Up @@ -726,7 +726,7 @@ impl<ChannelSigner: EcdsaChannelSigner> OnchainTxHandler<ChannelSigner> {
/// does not need to equal the current blockchain tip height, which should be provided via
/// `cur_height`, however it must never be higher than `cur_height`.
pub(super) fn update_claims_view_from_requests<B: Deref, F: Deref, L: Logger>(
&mut self, requests: Vec<PackageTemplate>, conf_height: u32, cur_height: u32,
&mut self, mut requests: Vec<PackageTemplate>, conf_height: u32, cur_height: u32,
broadcaster: &B, conf_target: ConfirmationTarget,
fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
) where
Expand All @@ -737,49 +737,67 @@ impl<ChannelSigner: EcdsaChannelSigner> OnchainTxHandler<ChannelSigner> {
log_debug!(logger, "Updating claims view at height {} with {} claim requests", cur_height, requests.len());
}

let mut preprocessed_requests = Vec::with_capacity(requests.len());
let mut aggregated_request = None;

// Try to aggregate outputs if their timelock expiration isn't imminent (package timelock
// <= CLTV_SHARED_CLAIM_BUFFER) and they don't require an immediate nLockTime (aggregable).
for req in requests {
// Don't claim a outpoint twice that would be bad for privacy and may uselessly lock a CPFP input for a while
if let Some(_) = self.claimable_outpoints.get(req.outpoints()[0]) {
log_info!(logger, "Ignoring second claim for outpoint {}:{}, already registered its claiming request", req.outpoints()[0].txid, req.outpoints()[0].vout);
// First drop any duplicate claims.
requests.retain(|req| {
debug_assert_eq!(
req.outpoints().len(),
1,
"Claims passed to `update_claims_view_from_requests` should not be aggregated"
);
let mut all_outpoints_claiming = true;
for outpoint in req.outpoints() {
if self.claimable_outpoints.get(outpoint).is_none() {
all_outpoints_claiming = false;
}
}
if all_outpoints_claiming {
log_info!(logger, "Ignoring second claim for outpoint {}:{}, already registered its claiming request",
req.outpoints()[0].txid, req.outpoints()[0].vout);
false
} else {
let timelocked_equivalent_package = self.locktimed_packages.iter().map(|v| v.1.iter()).flatten()
.find(|locked_package| locked_package.outpoints() == req.outpoints());
if let Some(package) = timelocked_equivalent_package {
log_info!(logger, "Ignoring second claim for outpoint {}:{}, we already have one which we're waiting on a timelock at {} for.",
req.outpoints()[0].txid, req.outpoints()[0].vout, package.package_locktime(cur_height));
continue;
false
} else {
true
}
}
});

let package_locktime = req.package_locktime(cur_height);
if package_locktime > cur_height + 1 {
log_info!(logger, "Delaying claim of package until its timelock at {} (current height {}), the following outpoints are spent:", package_locktime, cur_height);
for outpoint in req.outpoints() {
log_info!(logger, " Outpoint {}", outpoint);
// Then try to maximally aggregate `requests`.
for i in (1..requests.len()).rev() {
for j in 0..i {
if requests[i].can_merge_with(&requests[j], cur_height) {
let merge = requests.remove(i);
if let Err(rejected) = requests[j].merge_package(merge, cur_height) {
debug_assert!(false, "Merging package should not be rejected after verifying can_merge_with.");
requests.insert(i, rejected);
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Hmm, removing then inserting at every step is kinda annoying cause it generally requires a vec shift...I'm not entirely convinced by this commit. If we want to reduce the risk of accidental panic introductions with code changes maybe we rename merge_package to make it clearer that it assumes can_merge_with?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yeah, the re-inserts were only introduced as an alternative to panicking on the Err(_) here. However, those errors should never occur as we call can_merge_with beforehand. Added a debug_assert!(false, _). I can change the re-inserts to a panic! as well to maintain the previous behavior.

The main goal was to push panic!s up in the stack, while avoiding preconditions on merge_package. Since the Result of merge_package is determined by can_merge_with, the latter can be used beforehand to optimize any calls.

I can remove the commit as well though.

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is fine. Note that the fixup commit will need to go on the Make PackageTemplate::merge_package fallible commit, not at the end where it currently sits.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Moved the fixup commit into the right place.

} else {
break;
}
self.locktimed_packages.entry(package_locktime).or_default().push(req);
continue;
}
}
}

log_trace!(logger, "Test if outpoint which our counterparty can spend at {} can be aggregated based on aggregation limit {}", req.counterparty_spendable_height(), cur_height + CLTV_SHARED_CLAIM_BUFFER);
if req.counterparty_spendable_height() <= cur_height + CLTV_SHARED_CLAIM_BUFFER || !req.aggregable() {
preprocessed_requests.push(req);
} else if aggregated_request.is_none() {
aggregated_request = Some(req);
} else {
aggregated_request.as_mut().unwrap().merge_package(req);
// Finally, split requests into timelocked ones and immediately-spendable ones.
let mut preprocessed_requests = Vec::with_capacity(requests.len());
for req in requests {
let package_locktime = req.package_locktime(cur_height);
if package_locktime > cur_height + 1 {
log_info!(logger, "Delaying claim of package until its timelock at {} (current height {}), the following outpoints are spent:", package_locktime, cur_height);
for outpoint in req.outpoints() {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Pre-existing, but it looks like PackageTemplate::outpoints might be able to return an iterator and save some allocations. Probably not for this PR though.

Copy link
Contributor Author

@wvanlint wvanlint Dec 5, 2024

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I took a quick stab at it, but it did not turn out to be entirely straightforward. Is it okay to postpone to another PR?

log_info!(logger, " Outpoint {}", outpoint);
}
self.locktimed_packages.entry(package_locktime).or_default().push(req);
} else {
preprocessed_requests.push(req);
}
}
if let Some(req) = aggregated_request {
preprocessed_requests.push(req);
}

// Claim everything up to and including `cur_height`
// Claim everything up to and including `cur_height`.
let remaining_locked_packages = self.locktimed_packages.split_off(&(cur_height + 1));
if !self.locktimed_packages.is_empty() {
log_debug!(logger,
Expand Down Expand Up @@ -1088,7 +1106,7 @@ impl<ChannelSigner: EcdsaChannelSigner> OnchainTxHandler<ChannelSigner> {
OnchainEvent::ContentiousOutpoint { package } => {
if let Some(pending_claim) = self.claimable_outpoints.get(package.outpoints()[0]) {
if let Some(request) = self.pending_claim_requests.get_mut(&pending_claim.0) {
request.merge_package(package);
assert!(request.merge_package(package, height).is_ok());
// Using a HashMap guarantee us than if we have multiple outpoints getting
// resurrected only one bump claim tx is going to be broadcast
bump_candidates.insert(pending_claim.clone(), request.clone());
Expand Down
Loading
Loading