Skip to content

Commit 374e622

Browse files
committed
Update test_dup_htlc_onchain_fails_on_reload for new persist API
ChannelMonitors now require that they be re-persisted before MonitorEvents be provided to the ChannelManager, the exact thing that test_dup_htlc_onchain_fails_on_reload was testing for when it *didn't* happen. As such, test_dup_htlc_onchain_fails_on_reload is now testing that we bahve correctly when the API guarantees are not met, something we don't need to do. Here, we adapt it to test the new API requirements through ChainMonitor's calls to the Persist trait instead.
1 parent cf55b97 commit 374e622

File tree

2 files changed

+57
-23
lines changed

2 files changed

+57
-23
lines changed

lightning/src/ln/functional_tests.rs

+49-21
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@
1212
//! claim outputs on-chain.
1313
1414
use chain;
15-
use chain::{Confirm, Listen, Watch};
15+
use chain::{Confirm, Listen, Watch, ChannelMonitorUpdateErr};
1616
use chain::channelmonitor;
1717
use chain::channelmonitor::{ChannelMonitor, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY};
1818
use chain::transaction::OutPoint;
@@ -4190,21 +4190,15 @@ fn mpp_failure() {
41904190
fail_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], false, payment_hash);
41914191
}
41924192

4193-
#[test]
4194-
fn test_dup_htlc_onchain_fails_on_reload() {
4193+
fn do_test_dup_htlc_onchain_fails_on_reload(persist_manager_post_event: bool) {
41954194
// When a Channel is closed, any outbound HTLCs which were relayed through it are simply
41964195
// dropped when the Channel is. From there, the ChannelManager relies on the ChannelMonitor
41974196
// having a copy of the relevant fail-/claim-back data and processes the HTLC fail/claim when
41984197
// the ChannelMonitor tells it to.
41994198
//
4200-
// If, due to an on-chain event, an HTLC is failed/claimed, and then we serialize the
4201-
// ChannelManager, we generally expect there not to be a duplicate HTLC fail/claim (eg via a
4202-
// PaymentPathFailed event appearing). However, because we may not serialize the relevant
4203-
// ChannelMonitor at the same time, this isn't strictly guaranteed. In order to provide this
4204-
// consistency, the ChannelManager explicitly tracks pending-onchain-resolution outbound HTLCs
4205-
// and de-duplicates ChannelMonitor events.
4206-
//
4207-
// This tests that explicit tracking behavior.
4199+
// If, due to an on-chain event, an HTLC is failed/claimed, we should avoid providing the
4200+
// ChannelManaver the HTLC event until after the monitor is re-persisted. This should prevent a
4201+
// duplicate HTLC fail/claim (e.g. via a PaymentPathFailed event).
42084202
let chanmon_cfgs = create_chanmon_cfgs(2);
42094203
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
42104204
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
@@ -4213,7 +4207,7 @@ fn test_dup_htlc_onchain_fails_on_reload() {
42134207
let nodes_0_deserialized: ChannelManager<EnforcingSigner, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>;
42144208
let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
42154209

4216-
let chan_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).2;
4210+
let (_, _, chan_id, funding_tx) = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
42174211

42184212
// Route a payment, but force-close the channel before the HTLC fulfill message arrives at
42194213
// nodes[0].
@@ -4245,21 +4239,43 @@ fn test_dup_htlc_onchain_fails_on_reload() {
42454239
header.prev_blockhash = nodes[0].best_block_hash();
42464240
connect_block(&nodes[0], &Block { header, txdata: vec![node_txn[1].clone(), node_txn[2].clone()]});
42474241

4248-
// Serialize out the ChannelMonitor before connecting the on-chain claim transactions. This is
4249-
// fairly normal behavior as ChannelMonitor(s) are often not re-serialized when on-chain events
4250-
// happen, unlike ChannelManager which tends to be re-serialized after any relevant event(s).
4251-
let mut chan_0_monitor_serialized = test_utils::TestVecWriter(Vec::new());
4252-
get_monitor!(nodes[0], chan_id).write(&mut chan_0_monitor_serialized).unwrap();
4242+
// Now connect the HTLC claim transaction with the ChainMonitor-generated ChannelMonitor update
4243+
// returning TemporaryFailure. This should cause the claim event to never make its way to the
4244+
// ChannelManager.
4245+
chanmon_cfgs[0].persister.non_update_monitor_persistences.lock().unwrap().clear();
4246+
chanmon_cfgs[0].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
42534247

42544248
header.prev_blockhash = nodes[0].best_block_hash();
42554249
let claim_block = Block { header, txdata: claim_txn};
42564250
connect_block(&nodes[0], &claim_block);
4257-
expect_payment_sent!(nodes[0], payment_preimage);
42584251

4259-
// ChannelManagers generally get re-serialized after any relevant event(s). Since we just
4260-
// connected a highly-relevant block, it likely gets serialized out now.
4252+
let funding_txo = OutPoint { txid: funding_tx.txid(), index: 0 };
4253+
let mon_updates: Vec<_> = chanmon_cfgs[0].persister.non_update_monitor_persistences.lock().unwrap()
4254+
.get_mut(&funding_txo).unwrap().drain().collect();
4255+
assert_eq!(mon_updates.len(), 1);
4256+
assert!(nodes[0].chain_monitor.release_pending_monitor_events().is_empty());
4257+
assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
4258+
4259+
// If we persist the ChannelManager here, we should get the PaymentSent event after
4260+
// deserialization.
42614261
let mut chan_manager_serialized = test_utils::TestVecWriter(Vec::new());
4262-
nodes[0].node.write(&mut chan_manager_serialized).unwrap();
4262+
if !persist_manager_post_event {
4263+
nodes[0].node.write(&mut chan_manager_serialized).unwrap();
4264+
}
4265+
4266+
// Now persist the ChannelMonitor and inform the ChainMonitor that we're done, generating the
4267+
// payment sent event.
4268+
chanmon_cfgs[0].persister.set_update_ret(Ok(()));
4269+
let mut chan_0_monitor_serialized = test_utils::TestVecWriter(Vec::new());
4270+
get_monitor!(nodes[0], chan_id).write(&mut chan_0_monitor_serialized).unwrap();
4271+
nodes[0].chain_monitor.chain_monitor.channel_monitor_updated(funding_txo, mon_updates[0]);
4272+
expect_payment_sent!(nodes[0], payment_preimage);
4273+
4274+
// If we persist the ChannelManager after we get the PaymentSent event, we shouldn't get it
4275+
// twice.
4276+
if persist_manager_post_event {
4277+
nodes[0].node.write(&mut chan_manager_serialized).unwrap();
4278+
}
42634279

42644280
// Now reload nodes[0]...
42654281
persister = test_utils::TestPersister::new();
@@ -4291,6 +4307,12 @@ fn test_dup_htlc_onchain_fails_on_reload() {
42914307
check_added_monitors!(nodes[0], 1);
42924308
nodes[0].node = &nodes_0_deserialized;
42934309

4310+
if persist_manager_post_event {
4311+
assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
4312+
} else {
4313+
expect_payment_sent!(nodes[0], payment_preimage);
4314+
}
4315+
42944316
// Note that if we re-connect the block which exposed nodes[0] to the payment preimage (but
42954317
// which the current ChannelMonitor has not seen), the ChannelManager's de-duplication of
42964318
// payment events should kick in, leaving us with no pending events here.
@@ -4299,6 +4321,12 @@ fn test_dup_htlc_onchain_fails_on_reload() {
42994321
assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
43004322
}
43014323

4324+
#[test]
4325+
fn test_dup_htlc_onchain_fails_on_reload() {
4326+
do_test_dup_htlc_onchain_fails_on_reload(true);
4327+
do_test_dup_htlc_onchain_fails_on_reload(false);
4328+
}
4329+
43024330
#[test]
43034331
fn test_manager_serialize_deserialize_events() {
43044332
// This test makes sure the events field in ChannelManager survives de/serialization

lightning/src/util/test_utils.rs

+8-2
Original file line numberDiff line numberDiff line change
@@ -165,13 +165,16 @@ pub struct TestPersister {
165165
/// If this is set to Some(), after the next return, we'll always return this until update_ret
166166
/// is changed:
167167
pub next_update_ret: Mutex<Option<Result<(), chain::ChannelMonitorUpdateErr>>>,
168-
168+
/// When we get an update_persisted_channel call with no ChannelMonitorUpdate, we insert the
169+
/// MonitorUpdateId here.
170+
pub non_update_monitor_persistences: Mutex<HashMap<OutPoint, HashSet<MonitorUpdateId>>>,
169171
}
170172
impl TestPersister {
171173
pub fn new() -> Self {
172174
Self {
173175
update_ret: Mutex::new(Ok(())),
174176
next_update_ret: Mutex::new(None),
177+
non_update_monitor_persistences: Mutex::new(HashMap::new()),
175178
}
176179
}
177180

@@ -188,11 +191,14 @@ impl<Signer: keysinterface::Sign> chainmonitor::Persist<Signer> for TestPersiste
188191
ret
189192
}
190193

191-
fn update_persisted_channel(&self, _funding_txo: OutPoint, _update: &Option<channelmonitor::ChannelMonitorUpdate>, _data: &channelmonitor::ChannelMonitor<Signer>, _id: MonitorUpdateId) -> Result<(), chain::ChannelMonitorUpdateErr> {
194+
fn update_persisted_channel(&self, funding_txo: OutPoint, update: &Option<channelmonitor::ChannelMonitorUpdate>, _data: &channelmonitor::ChannelMonitor<Signer>, update_id: MonitorUpdateId) -> Result<(), chain::ChannelMonitorUpdateErr> {
192195
let ret = self.update_ret.lock().unwrap().clone();
193196
if let Some(next_ret) = self.next_update_ret.lock().unwrap().take() {
194197
*self.update_ret.lock().unwrap() = next_ret;
195198
}
199+
if update.is_none() {
200+
self.non_update_monitor_persistences.lock().unwrap().entry(funding_txo).or_insert(HashSet::new()).insert(update_id);
201+
}
196202
ret
197203
}
198204
}

0 commit comments

Comments
 (0)