Skip to content

Commit f73999d

Browse files
committed
Update test_dup_htlc_onchain_fails_on_reload for new persist API
ChannelMonitors now require that they be re-persisted before MonitorEvents be provided to the ChannelManager, the exact thing that test_dup_htlc_onchain_fails_on_reload was testing for when it *didn't* happen. As such, test_dup_htlc_onchain_fails_on_reload is now testing that we bahve correctly when the API guarantees are not met, something we don't need to do. Here, we adapt it to test the new API requirements through ChainMonitor's calls to the Persist trait instead.
1 parent 0d62234 commit f73999d

File tree

2 files changed

+62
-26
lines changed

2 files changed

+62
-26
lines changed

lightning/src/ln/functional_tests.rs

+54-24
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@
1212
//! claim outputs on-chain.
1313
1414
use chain;
15-
use chain::{Confirm, Listen, Watch};
15+
use chain::{Confirm, Listen, Watch, ChannelMonitorUpdateErr};
1616
use chain::channelmonitor;
1717
use chain::channelmonitor::{ChannelMonitor, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY};
1818
use chain::transaction::OutPoint;
@@ -4190,21 +4190,15 @@ fn mpp_failure() {
41904190
fail_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], false, payment_hash);
41914191
}
41924192

4193-
#[test]
4194-
fn test_dup_htlc_onchain_fails_on_reload() {
4193+
fn do_test_dup_htlc_onchain_fails_on_reload(persist_manager_post_event: bool) {
41954194
// When a Channel is closed, any outbound HTLCs which were relayed through it are simply
41964195
// dropped when the Channel is. From there, the ChannelManager relies on the ChannelMonitor
41974196
// having a copy of the relevant fail-/claim-back data and processes the HTLC fail/claim when
41984197
// the ChannelMonitor tells it to.
41994198
//
4200-
// If, due to an on-chain event, an HTLC is failed/claimed, and then we serialize the
4201-
// ChannelManager, we generally expect there not to be a duplicate HTLC fail/claim (eg via a
4202-
// PaymentPathFailed event appearing). However, because we may not serialize the relevant
4203-
// ChannelMonitor at the same time, this isn't strictly guaranteed. In order to provide this
4204-
// consistency, the ChannelManager explicitly tracks pending-onchain-resolution outbound HTLCs
4205-
// and de-duplicates ChannelMonitor events.
4206-
//
4207-
// This tests that explicit tracking behavior.
4199+
// If, due to an on-chain event, an HTLC is failed/claimed, we should avoid providing the
4200+
// ChannelManaver the HTLC event until after the monitor is re-persisted. This should prevent a
4201+
// duplicate HTLC fail/claim (e.g. via a PaymentPathFailed event).
42084202
let chanmon_cfgs = create_chanmon_cfgs(2);
42094203
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
42104204
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
@@ -4213,7 +4207,7 @@ fn test_dup_htlc_onchain_fails_on_reload() {
42134207
let nodes_0_deserialized: ChannelManager<EnforcingSigner, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>;
42144208
let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
42154209

4216-
let chan_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).2;
4210+
let (_, _, chan_id, funding_tx) = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
42174211

42184212
// Route a payment, but force-close the channel before the HTLC fulfill message arrives at
42194213
// nodes[0].
@@ -4231,35 +4225,59 @@ fn test_dup_htlc_onchain_fails_on_reload() {
42314225
let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
42324226
assert_eq!(node_txn.len(), 3);
42334227
assert_eq!(node_txn[0], node_txn[1]);
4228+
check_spends!(node_txn[1], funding_tx);
4229+
check_spends!(node_txn[2], node_txn[1]);
42344230

42354231
assert!(nodes[1].node.claim_funds(payment_preimage));
42364232
check_added_monitors!(nodes[1], 1);
42374233

42384234
let mut header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
4239-
connect_block(&nodes[1], &Block { header, txdata: vec![node_txn[1].clone(), node_txn[2].clone()]});
4235+
connect_block(&nodes[1], &Block { header, txdata: vec![node_txn[1].clone()]});
42404236
check_closed_broadcast!(nodes[1], true);
42414237
check_added_monitors!(nodes[1], 1);
42424238
check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
42434239
let claim_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
42444240

42454241
header.prev_blockhash = nodes[0].best_block_hash();
4246-
connect_block(&nodes[0], &Block { header, txdata: vec![node_txn[1].clone(), node_txn[2].clone()]});
4242+
connect_block(&nodes[0], &Block { header, txdata: vec![node_txn[1].clone()]});
42474243

4248-
// Serialize out the ChannelMonitor before connecting the on-chain claim transactions. This is
4249-
// fairly normal behavior as ChannelMonitor(s) are often not re-serialized when on-chain events
4250-
// happen, unlike ChannelManager which tends to be re-serialized after any relevant event(s).
4251-
let mut chan_0_monitor_serialized = test_utils::TestVecWriter(Vec::new());
4252-
get_monitor!(nodes[0], chan_id).write(&mut chan_0_monitor_serialized).unwrap();
4244+
// Now connect the HTLC claim transaction with the ChainMonitor-generated ChannelMonitor update
4245+
// returning TemporaryFailure. This should cause the claim event to never make its way to the
4246+
// ChannelManager.
4247+
chanmon_cfgs[0].persister.non_update_monitor_persistences.lock().unwrap().clear();
4248+
chanmon_cfgs[0].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
42534249

42544250
header.prev_blockhash = nodes[0].best_block_hash();
4255-
let claim_block = Block { header, txdata: claim_txn};
4251+
let claim_block = Block { header, txdata: claim_txn };
42564252
connect_block(&nodes[0], &claim_block);
4257-
expect_payment_sent!(nodes[0], payment_preimage);
42584253

4259-
// ChannelManagers generally get re-serialized after any relevant event(s). Since we just
4260-
// connected a highly-relevant block, it likely gets serialized out now.
4254+
let funding_txo = OutPoint { txid: funding_tx.txid(), index: 0 };
4255+
let mon_updates: Vec<_> = chanmon_cfgs[0].persister.non_update_monitor_persistences.lock().unwrap()
4256+
.get_mut(&funding_txo).unwrap().drain().collect();
4257+
assert_eq!(mon_updates.len(), 1);
4258+
assert!(nodes[0].chain_monitor.release_pending_monitor_events().is_empty());
4259+
assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
4260+
4261+
// If we persist the ChannelManager here, we should get the PaymentSent event after
4262+
// deserialization.
42614263
let mut chan_manager_serialized = test_utils::TestVecWriter(Vec::new());
4262-
nodes[0].node.write(&mut chan_manager_serialized).unwrap();
4264+
if !persist_manager_post_event {
4265+
nodes[0].node.write(&mut chan_manager_serialized).unwrap();
4266+
}
4267+
4268+
// Now persist the ChannelMonitor and inform the ChainMonitor that we're done, generating the
4269+
// payment sent event.
4270+
chanmon_cfgs[0].persister.set_update_ret(Ok(()));
4271+
let mut chan_0_monitor_serialized = test_utils::TestVecWriter(Vec::new());
4272+
get_monitor!(nodes[0], chan_id).write(&mut chan_0_monitor_serialized).unwrap();
4273+
nodes[0].chain_monitor.chain_monitor.channel_monitor_updated(funding_txo, mon_updates[0]);
4274+
expect_payment_sent!(nodes[0], payment_preimage);
4275+
4276+
// If we persist the ChannelManager after we get the PaymentSent event, we shouldn't get it
4277+
// twice.
4278+
if persist_manager_post_event {
4279+
nodes[0].node.write(&mut chan_manager_serialized).unwrap();
4280+
}
42634281

42644282
// Now reload nodes[0]...
42654283
persister = test_utils::TestPersister::new();
@@ -4291,6 +4309,12 @@ fn test_dup_htlc_onchain_fails_on_reload() {
42914309
check_added_monitors!(nodes[0], 1);
42924310
nodes[0].node = &nodes_0_deserialized;
42934311

4312+
if persist_manager_post_event {
4313+
assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
4314+
} else {
4315+
expect_payment_sent!(nodes[0], payment_preimage);
4316+
}
4317+
42944318
// Note that if we re-connect the block which exposed nodes[0] to the payment preimage (but
42954319
// which the current ChannelMonitor has not seen), the ChannelManager's de-duplication of
42964320
// payment events should kick in, leaving us with no pending events here.
@@ -4299,6 +4323,12 @@ fn test_dup_htlc_onchain_fails_on_reload() {
42994323
assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
43004324
}
43014325

4326+
#[test]
4327+
fn test_dup_htlc_onchain_fails_on_reload() {
4328+
do_test_dup_htlc_onchain_fails_on_reload(true);
4329+
do_test_dup_htlc_onchain_fails_on_reload(false);
4330+
}
4331+
43024332
#[test]
43034333
fn test_manager_serialize_deserialize_events() {
43044334
// This test makes sure the events field in ChannelManager survives de/serialization

lightning/src/util/test_utils.rs

+8-2
Original file line numberDiff line numberDiff line change
@@ -165,13 +165,16 @@ pub struct TestPersister {
165165
/// If this is set to Some(), after the next return, we'll always return this until update_ret
166166
/// is changed:
167167
pub next_update_ret: Mutex<Option<Result<(), chain::ChannelMonitorUpdateErr>>>,
168-
168+
/// When we get an update_persisted_channel call with no ChannelMonitorUpdate, we insert the
169+
/// MonitorUpdateId here.
170+
pub non_update_monitor_persistences: Mutex<HashMap<OutPoint, HashSet<MonitorUpdateId>>>,
169171
}
170172
impl TestPersister {
171173
pub fn new() -> Self {
172174
Self {
173175
update_ret: Mutex::new(Ok(())),
174176
next_update_ret: Mutex::new(None),
177+
non_update_monitor_persistences: Mutex::new(HashMap::new()),
175178
}
176179
}
177180

@@ -188,11 +191,14 @@ impl<Signer: keysinterface::Sign> chainmonitor::Persist<Signer> for TestPersiste
188191
ret
189192
}
190193

191-
fn update_persisted_channel(&self, _funding_txo: OutPoint, _update: &Option<channelmonitor::ChannelMonitorUpdate>, _data: &channelmonitor::ChannelMonitor<Signer>, _id: MonitorUpdateId) -> Result<(), chain::ChannelMonitorUpdateErr> {
194+
fn update_persisted_channel(&self, funding_txo: OutPoint, update: &Option<channelmonitor::ChannelMonitorUpdate>, _data: &channelmonitor::ChannelMonitor<Signer>, update_id: MonitorUpdateId) -> Result<(), chain::ChannelMonitorUpdateErr> {
192195
let ret = self.update_ret.lock().unwrap().clone();
193196
if let Some(next_ret) = self.next_update_ret.lock().unwrap().take() {
194197
*self.update_ret.lock().unwrap() = next_ret;
195198
}
199+
if update.is_none() {
200+
self.non_update_monitor_persistences.lock().unwrap().entry(funding_txo).or_insert(HashSet::new()).insert(update_id);
201+
}
196202
ret
197203
}
198204
}

0 commit comments

Comments
 (0)