Skip to content

Do not force-close channels when we cannot communicate with peers #1429

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
40 changes: 11 additions & 29 deletions lightning/src/ln/channelmanager.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5699,39 +5699,21 @@ impl<Signer: Sign, M: Deref , T: Deref , K: Deref , F: Deref , L: Deref >
let channel_state = &mut *channel_state_lock;
let pending_msg_events = &mut channel_state.pending_msg_events;
let short_to_id = &mut channel_state.short_to_id;
if no_connection_possible {
log_debug!(self.logger, "Failing all channels with {} due to no_connection_possible", log_pubkey!(counterparty_node_id));
channel_state.by_id.retain(|_, chan| {
if chan.get_counterparty_node_id() == *counterparty_node_id {
log_debug!(self.logger, "Marking channels with {} disconnected and generating channel_updates. We believe we {} make future connections to this peer.",
log_pubkey!(counterparty_node_id), if no_connection_possible { "cannot" } else { "can" });
channel_state.by_id.retain(|_, chan| {
if chan.get_counterparty_node_id() == *counterparty_node_id {
chan.remove_uncommitted_htlcs_and_mark_paused(&self.logger);
if chan.is_shutdown() {
update_maps_on_chan_removal!(self, short_to_id, chan);
failed_channels.push(chan.force_shutdown(true));
if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
msg: update
});
}
self.issue_channel_close_events(chan, ClosureReason::DisconnectedPeer);
false
return false;
} else {
true
}
});
} else {
log_debug!(self.logger, "Marking channels with {} disconnected and generating channel_updates", log_pubkey!(counterparty_node_id));
channel_state.by_id.retain(|_, chan| {
if chan.get_counterparty_node_id() == *counterparty_node_id {
chan.remove_uncommitted_htlcs_and_mark_paused(&self.logger);
if chan.is_shutdown() {
update_maps_on_chan_removal!(self, short_to_id, chan);
self.issue_channel_close_events(chan, ClosureReason::DisconnectedPeer);
return false;
} else {
no_channels_remain = false;
}
no_channels_remain = false;
}
true
})
}
}
true
});
pending_msg_events.retain(|msg| {
match msg {
&events::MessageSendEvent::SendAcceptChannel { ref node_id, .. } => node_id != counterparty_node_id,
Expand Down
18 changes: 9 additions & 9 deletions lightning/src/ln/functional_tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2156,9 +2156,9 @@ fn channel_monitor_network_test() {
send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3], &nodes[4])[..], 8000000);

// Simple case with no pending HTLCs:
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), true);
nodes[1].node.force_close_channel(&chan_1.2).unwrap();
check_added_monitors!(nodes[1], 1);
check_closed_broadcast!(nodes[1], false);
check_closed_broadcast!(nodes[1], true);
{
let mut node_txn = test_txn_broadcast(&nodes[1], &chan_1, None, HTLCType::NONE);
assert_eq!(node_txn.len(), 1);
Expand All @@ -2170,15 +2170,15 @@ fn channel_monitor_network_test() {
assert_eq!(nodes[0].node.list_channels().len(), 0);
assert_eq!(nodes[1].node.list_channels().len(), 1);
check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
check_closed_event!(nodes[1], 1, ClosureReason::DisconnectedPeer);
check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed);
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

FWIW, I think after this we we'll be missing test coverage of ClosureReason::DisconnectedPeer

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Right, well this doesn't change coverage of it for the "peer disconnected before confirmation" reason, only the no_connection_possible reason, so it doesn't really change coverage.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.


// One pending HTLC is discarded by the force-close:
let payment_preimage_1 = route_payment(&nodes[1], &vec!(&nodes[2], &nodes[3])[..], 3000000).0;

// Simple case of one pending HTLC to HTLC-Timeout (note that the HTLC-Timeout is not
// broadcasted until we reach the timelock time).
nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id(), true);
check_closed_broadcast!(nodes[1], false);
nodes[1].node.force_close_channel(&chan_2.2).unwrap();
check_closed_broadcast!(nodes[1], true);
check_added_monitors!(nodes[1], 1);
{
let mut node_txn = test_txn_broadcast(&nodes[1], &chan_2, None, HTLCType::NONE);
Expand All @@ -2191,7 +2191,7 @@ fn channel_monitor_network_test() {
check_closed_broadcast!(nodes[2], true);
assert_eq!(nodes[1].node.list_channels().len(), 0);
assert_eq!(nodes[2].node.list_channels().len(), 1);
check_closed_event!(nodes[1], 1, ClosureReason::DisconnectedPeer);
check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed);
check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed);

macro_rules! claim_funds {
Expand All @@ -2216,9 +2216,9 @@ fn channel_monitor_network_test() {

// nodes[3] gets the preimage, but nodes[2] already disconnected, resulting in a nodes[2]
// HTLC-Timeout and a nodes[3] claim against it (+ its own announces)
nodes[2].node.peer_disconnected(&nodes[3].node.get_our_node_id(), true);
nodes[2].node.force_close_channel(&chan_3.2).unwrap();
check_added_monitors!(nodes[2], 1);
check_closed_broadcast!(nodes[2], false);
check_closed_broadcast!(nodes[2], true);
let node2_commitment_txid;
{
let node_txn = test_txn_broadcast(&nodes[2], &chan_3, None, HTLCType::NONE);
Expand All @@ -2235,7 +2235,7 @@ fn channel_monitor_network_test() {
check_closed_broadcast!(nodes[3], true);
assert_eq!(nodes[2].node.list_channels().len(), 0);
assert_eq!(nodes[3].node.list_channels().len(), 1);
check_closed_event!(nodes[2], 1, ClosureReason::DisconnectedPeer);
check_closed_event!(nodes[2], 1, ClosureReason::HolderForceClosed);
check_closed_event!(nodes[3], 1, ClosureReason::CommitmentTxConfirmed);

// Drop the ChannelMonitor for the previous channel to avoid it broadcasting transactions and
Expand Down
9 changes: 7 additions & 2 deletions lightning/src/ln/peer_handler.rs
Original file line number Diff line number Diff line change
Expand Up @@ -257,8 +257,13 @@ pub trait SocketDescriptor : cmp::Eq + hash::Hash + Clone {
/// descriptor.
#[derive(Clone)]
pub struct PeerHandleError {
/// Used to indicate that we probably can't make any future connections to this peer, implying
/// we should go ahead and force-close any channels we have with it.
/// Used to indicate that we probably can't make any future connections to this peer (e.g.
/// because we required features that our peer was missing, or vice versa).
///
/// While LDK's [`ChannelManager`] will not do it automatically, you likely wish to force-close
/// any channels with this peer or check for new versions of LDK.
///
/// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager
pub no_connection_possible: bool,
}
impl fmt::Debug for PeerHandleError {
Expand Down
11 changes: 5 additions & 6 deletions lightning/src/util/events.rs
Original file line number Diff line number Diff line change
Expand Up @@ -100,12 +100,11 @@ pub enum ClosureReason {
/// A developer-readable error message which we generated.
err: String,
},
/// The `PeerManager` informed us that we've disconnected from the peer. We close channels
/// if the `PeerManager` informed us that it is unlikely we'll be able to connect to the
/// peer again in the future or if the peer disconnected before we finished negotiating
/// the channel open. The first case may be caused by incompatible features which our
/// counterparty, or we, require.
//TODO: split between PeerUnconnectable/PeerDisconnected ?
/// The peer disconnected prior to funding completing. In this case the spec mandates that we
/// forget the channel entirely - we can attempt again if the peer reconnects.
///
/// In LDK versions prior to 0.0.107 this could also occur if we were unable to connect to the
/// peer because of mutual incompatibility between us and our channel counterparty.
DisconnectedPeer,
/// Closure generated from `ChannelManager::read` if the ChannelMonitor is newer than
/// the ChannelManager deserialized.
Expand Down