Skip to content

Commit e39d63c

Browse files
committed
Do not force-close channels when we cannot communicate with peers
In general, we should never be automatically force-closing our users' channels unless there is some immediate risk of funds loss (ie because of some HTLC(s) which are timing out soon). In any other case, we should trust the user to be able to figure out what is going on and close their channels manually instead of trying to be overly clever and automate closures if we think the channel is useless. In this case, even if a peer has some required feature that does not allow us to communicate with them, there is a strong possibility that some LDK upgrade may allow us to in the future. In the mean time, there is no reason to go on-chain unless the user needs funds immediately. In such a case, the user should already have logic to force-close channels with peers which are not available for any reason.
1 parent 62edee5 commit e39d63c

File tree

4 files changed

+32
-46
lines changed

4 files changed

+32
-46
lines changed

lightning/src/ln/channelmanager.rs

+11-29
Original file line numberDiff line numberDiff line change
@@ -5699,39 +5699,21 @@ impl<Signer: Sign, M: Deref , T: Deref , K: Deref , F: Deref , L: Deref >
56995699
let channel_state = &mut *channel_state_lock;
57005700
let pending_msg_events = &mut channel_state.pending_msg_events;
57015701
let short_to_id = &mut channel_state.short_to_id;
5702-
if no_connection_possible {
5703-
log_debug!(self.logger, "Failing all channels with {} due to no_connection_possible", log_pubkey!(counterparty_node_id));
5704-
channel_state.by_id.retain(|_, chan| {
5705-
if chan.get_counterparty_node_id() == *counterparty_node_id {
5702+
log_debug!(self.logger, "Marking channels with {} disconnected and generating channel_updates. We believe we {} make future connections to this peer.",
5703+
log_pubkey!(counterparty_node_id), if no_connection_possible { "cannot" } else { "can" });
5704+
channel_state.by_id.retain(|_, chan| {
5705+
if chan.get_counterparty_node_id() == *counterparty_node_id {
5706+
chan.remove_uncommitted_htlcs_and_mark_paused(&self.logger);
5707+
if chan.is_shutdown() {
57065708
update_maps_on_chan_removal!(self, short_to_id, chan);
5707-
failed_channels.push(chan.force_shutdown(true));
5708-
if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
5709-
pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
5710-
msg: update
5711-
});
5712-
}
57135709
self.issue_channel_close_events(chan, ClosureReason::DisconnectedPeer);
5714-
false
5710+
return false;
57155711
} else {
5716-
true
5717-
}
5718-
});
5719-
} else {
5720-
log_debug!(self.logger, "Marking channels with {} disconnected and generating channel_updates", log_pubkey!(counterparty_node_id));
5721-
channel_state.by_id.retain(|_, chan| {
5722-
if chan.get_counterparty_node_id() == *counterparty_node_id {
5723-
chan.remove_uncommitted_htlcs_and_mark_paused(&self.logger);
5724-
if chan.is_shutdown() {
5725-
update_maps_on_chan_removal!(self, short_to_id, chan);
5726-
self.issue_channel_close_events(chan, ClosureReason::DisconnectedPeer);
5727-
return false;
5728-
} else {
5729-
no_channels_remain = false;
5730-
}
5712+
no_channels_remain = false;
57315713
}
5732-
true
5733-
})
5734-
}
5714+
}
5715+
true
5716+
});
57355717
pending_msg_events.retain(|msg| {
57365718
match msg {
57375719
&events::MessageSendEvent::SendAcceptChannel { ref node_id, .. } => node_id != counterparty_node_id,

lightning/src/ln/functional_tests.rs

+9-9
Original file line numberDiff line numberDiff line change
@@ -2156,9 +2156,9 @@ fn channel_monitor_network_test() {
21562156
send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3], &nodes[4])[..], 8000000);
21572157

21582158
// Simple case with no pending HTLCs:
2159-
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), true);
2159+
nodes[1].node.force_close_channel(&chan_1.2).unwrap();
21602160
check_added_monitors!(nodes[1], 1);
2161-
check_closed_broadcast!(nodes[1], false);
2161+
check_closed_broadcast!(nodes[1], true);
21622162
{
21632163
let mut node_txn = test_txn_broadcast(&nodes[1], &chan_1, None, HTLCType::NONE);
21642164
assert_eq!(node_txn.len(), 1);
@@ -2170,15 +2170,15 @@ fn channel_monitor_network_test() {
21702170
assert_eq!(nodes[0].node.list_channels().len(), 0);
21712171
assert_eq!(nodes[1].node.list_channels().len(), 1);
21722172
check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
2173-
check_closed_event!(nodes[1], 1, ClosureReason::DisconnectedPeer);
2173+
check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed);
21742174

21752175
// One pending HTLC is discarded by the force-close:
21762176
let payment_preimage_1 = route_payment(&nodes[1], &vec!(&nodes[2], &nodes[3])[..], 3000000).0;
21772177

21782178
// Simple case of one pending HTLC to HTLC-Timeout (note that the HTLC-Timeout is not
21792179
// broadcasted until we reach the timelock time).
2180-
nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id(), true);
2181-
check_closed_broadcast!(nodes[1], false);
2180+
nodes[1].node.force_close_channel(&chan_2.2).unwrap();
2181+
check_closed_broadcast!(nodes[1], true);
21822182
check_added_monitors!(nodes[1], 1);
21832183
{
21842184
let mut node_txn = test_txn_broadcast(&nodes[1], &chan_2, None, HTLCType::NONE);
@@ -2191,7 +2191,7 @@ fn channel_monitor_network_test() {
21912191
check_closed_broadcast!(nodes[2], true);
21922192
assert_eq!(nodes[1].node.list_channels().len(), 0);
21932193
assert_eq!(nodes[2].node.list_channels().len(), 1);
2194-
check_closed_event!(nodes[1], 1, ClosureReason::DisconnectedPeer);
2194+
check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed);
21952195
check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed);
21962196

21972197
macro_rules! claim_funds {
@@ -2216,9 +2216,9 @@ fn channel_monitor_network_test() {
22162216

22172217
// nodes[3] gets the preimage, but nodes[2] already disconnected, resulting in a nodes[2]
22182218
// HTLC-Timeout and a nodes[3] claim against it (+ its own announces)
2219-
nodes[2].node.peer_disconnected(&nodes[3].node.get_our_node_id(), true);
2219+
nodes[2].node.force_close_channel(&chan_3.2).unwrap();
22202220
check_added_monitors!(nodes[2], 1);
2221-
check_closed_broadcast!(nodes[2], false);
2221+
check_closed_broadcast!(nodes[2], true);
22222222
let node2_commitment_txid;
22232223
{
22242224
let node_txn = test_txn_broadcast(&nodes[2], &chan_3, None, HTLCType::NONE);
@@ -2235,7 +2235,7 @@ fn channel_monitor_network_test() {
22352235
check_closed_broadcast!(nodes[3], true);
22362236
assert_eq!(nodes[2].node.list_channels().len(), 0);
22372237
assert_eq!(nodes[3].node.list_channels().len(), 1);
2238-
check_closed_event!(nodes[2], 1, ClosureReason::DisconnectedPeer);
2238+
check_closed_event!(nodes[2], 1, ClosureReason::HolderForceClosed);
22392239
check_closed_event!(nodes[3], 1, ClosureReason::CommitmentTxConfirmed);
22402240

22412241
// Drop the ChannelMonitor for the previous channel to avoid it broadcasting transactions and

lightning/src/ln/peer_handler.rs

+7-2
Original file line numberDiff line numberDiff line change
@@ -257,8 +257,13 @@ pub trait SocketDescriptor : cmp::Eq + hash::Hash + Clone {
257257
/// descriptor.
258258
#[derive(Clone)]
259259
pub struct PeerHandleError {
260-
/// Used to indicate that we probably can't make any future connections to this peer, implying
261-
/// we should go ahead and force-close any channels we have with it.
260+
/// Used to indicate that we probably can't make any future connections to this peer (e.g.
261+
/// because we required features that our peer was missing, or vice versa).
262+
///
263+
/// While LDK's [`ChannelManager`] will not do it automatically, you likely wish to force-close
264+
/// any channels with this peer or check for new versions of LDK.
265+
///
266+
/// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager
262267
pub no_connection_possible: bool,
263268
}
264269
impl fmt::Debug for PeerHandleError {

lightning/src/util/events.rs

+5-6
Original file line numberDiff line numberDiff line change
@@ -100,12 +100,11 @@ pub enum ClosureReason {
100100
/// A developer-readable error message which we generated.
101101
err: String,
102102
},
103-
/// The `PeerManager` informed us that we've disconnected from the peer. We close channels
104-
/// if the `PeerManager` informed us that it is unlikely we'll be able to connect to the
105-
/// peer again in the future or if the peer disconnected before we finished negotiating
106-
/// the channel open. The first case may be caused by incompatible features which our
107-
/// counterparty, or we, require.
108-
//TODO: split between PeerUnconnectable/PeerDisconnected ?
103+
/// The peer disconnected prior to funding completing. In this case the spec mandates that we
104+
/// forget the channel entirely - we can attempt again if the peer reconnects.
105+
///
106+
/// In LDK versions prior to 0.0.107 this could also occur if we were unable to connect to the
107+
/// peer because of mutual incompatibility between us and our channel counterparty.
109108
DisconnectedPeer,
110109
/// Closure generated from `ChannelManager::read` if the ChannelMonitor is newer than
111110
/// the ChannelManager deserialized.

0 commit comments

Comments
 (0)