Skip to content

Persist ChannelMonitors after new blocks are connected #1108

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
26 changes: 17 additions & 9 deletions fuzz/src/chanmon_consistency.rs
Original file line number Diff line number Diff line change
Expand Up @@ -855,22 +855,26 @@ pub fn do_test<Out: test_logger::Output>(data: &[u8], out: Out) {

0x08 => {
if let Some((id, _)) = monitor_a.latest_monitors.lock().unwrap().get(&chan_1_funding) {
nodes[0].channel_monitor_updated(&chan_1_funding, *id);
monitor_a.chain_monitor.force_channel_monitor_updated(chan_1_funding, *id);
nodes[0].process_monitor_events();
}
},
0x09 => {
if let Some((id, _)) = monitor_b.latest_monitors.lock().unwrap().get(&chan_1_funding) {
nodes[1].channel_monitor_updated(&chan_1_funding, *id);
monitor_b.chain_monitor.force_channel_monitor_updated(chan_1_funding, *id);
nodes[1].process_monitor_events();
}
},
0x0a => {
if let Some((id, _)) = monitor_b.latest_monitors.lock().unwrap().get(&chan_2_funding) {
nodes[1].channel_monitor_updated(&chan_2_funding, *id);
monitor_b.chain_monitor.force_channel_monitor_updated(chan_2_funding, *id);
nodes[1].process_monitor_events();
}
},
0x0b => {
if let Some((id, _)) = monitor_c.latest_monitors.lock().unwrap().get(&chan_2_funding) {
nodes[2].channel_monitor_updated(&chan_2_funding, *id);
monitor_c.chain_monitor.force_channel_monitor_updated(chan_2_funding, *id);
nodes[2].process_monitor_events();
}
},

Expand Down Expand Up @@ -1071,22 +1075,26 @@ pub fn do_test<Out: test_logger::Output>(data: &[u8], out: Out) {
// Test that no channel is in a stuck state where neither party can send funds even
// after we resolve all pending events.
// First make sure there are no pending monitor updates, resetting the error state
// and calling channel_monitor_updated for each monitor.
// and calling force_channel_monitor_updated for each monitor.
*monitor_a.persister.update_ret.lock().unwrap() = Ok(());
*monitor_b.persister.update_ret.lock().unwrap() = Ok(());
*monitor_c.persister.update_ret.lock().unwrap() = Ok(());

if let Some((id, _)) = monitor_a.latest_monitors.lock().unwrap().get(&chan_1_funding) {
nodes[0].channel_monitor_updated(&chan_1_funding, *id);
monitor_a.chain_monitor.force_channel_monitor_updated(chan_1_funding, *id);
nodes[0].process_monitor_events();
}
if let Some((id, _)) = monitor_b.latest_monitors.lock().unwrap().get(&chan_1_funding) {
nodes[1].channel_monitor_updated(&chan_1_funding, *id);
monitor_b.chain_monitor.force_channel_monitor_updated(chan_1_funding, *id);
nodes[1].process_monitor_events();
}
if let Some((id, _)) = monitor_b.latest_monitors.lock().unwrap().get(&chan_2_funding) {
nodes[1].channel_monitor_updated(&chan_2_funding, *id);
monitor_b.chain_monitor.force_channel_monitor_updated(chan_2_funding, *id);
nodes[1].process_monitor_events();
}
if let Some((id, _)) = monitor_c.latest_monitors.lock().unwrap().get(&chan_2_funding) {
nodes[2].channel_monitor_updated(&chan_2_funding, *id);
monitor_c.chain_monitor.force_channel_monitor_updated(chan_2_funding, *id);
nodes[2].process_monitor_events();
}

// Next, make sure peers are all connected to each other
Expand Down
5 changes: 3 additions & 2 deletions fuzz/src/utils/test_persister.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
use lightning::chain;
use lightning::chain::{chainmonitor, channelmonitor};
use lightning::chain::chainmonitor::MonitorUpdateId;
use lightning::chain::transaction::OutPoint;
use lightning::util::enforcing_trait_impls::EnforcingSigner;

Expand All @@ -9,11 +10,11 @@ pub struct TestPersister {
pub update_ret: Mutex<Result<(), chain::ChannelMonitorUpdateErr>>,
}
impl chainmonitor::Persist<EnforcingSigner> for TestPersister {
fn persist_new_channel(&self, _funding_txo: OutPoint, _data: &channelmonitor::ChannelMonitor<EnforcingSigner>) -> Result<(), chain::ChannelMonitorUpdateErr> {
fn persist_new_channel(&self, _funding_txo: OutPoint, _data: &channelmonitor::ChannelMonitor<EnforcingSigner>, _update_id: MonitorUpdateId) -> Result<(), chain::ChannelMonitorUpdateErr> {
self.update_ret.lock().unwrap().clone()
}

fn update_persisted_channel(&self, _funding_txo: OutPoint, _update: &channelmonitor::ChannelMonitorUpdate, _data: &channelmonitor::ChannelMonitor<EnforcingSigner>) -> Result<(), chain::ChannelMonitorUpdateErr> {
fn update_persisted_channel(&self, _funding_txo: OutPoint, _update: &Option<channelmonitor::ChannelMonitorUpdate>, _data: &channelmonitor::ChannelMonitor<EnforcingSigner>, _update_id: MonitorUpdateId) -> Result<(), chain::ChannelMonitorUpdateErr> {
self.update_ret.lock().unwrap().clone()
}
}
17 changes: 13 additions & 4 deletions lightning-persister/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -159,13 +159,18 @@ impl FilesystemPersister {
}

impl<ChannelSigner: Sign> chainmonitor::Persist<ChannelSigner> for FilesystemPersister {
fn persist_new_channel(&self, funding_txo: OutPoint, monitor: &ChannelMonitor<ChannelSigner>) -> Result<(), chain::ChannelMonitorUpdateErr> {
// TODO: We really need a way for the persister to inform the user that its time to crash/shut
// down once these start returning failure.
// A PermanentFailure implies we need to shut down since we're force-closing channels without
// even broadcasting!

fn persist_new_channel(&self, funding_txo: OutPoint, monitor: &ChannelMonitor<ChannelSigner>, _update_id: chainmonitor::MonitorUpdateId) -> Result<(), chain::ChannelMonitorUpdateErr> {
let filename = format!("{}_{}", funding_txo.txid.to_hex(), funding_txo.index);
util::write_to_file(self.path_to_monitor_data(), filename, monitor)
.map_err(|_| chain::ChannelMonitorUpdateErr::PermanentFailure)
}

fn update_persisted_channel(&self, funding_txo: OutPoint, _update: &ChannelMonitorUpdate, monitor: &ChannelMonitor<ChannelSigner>) -> Result<(), chain::ChannelMonitorUpdateErr> {
fn update_persisted_channel(&self, funding_txo: OutPoint, _update: &Option<ChannelMonitorUpdate>, monitor: &ChannelMonitor<ChannelSigner>, _update_id: chainmonitor::MonitorUpdateId) -> Result<(), chain::ChannelMonitorUpdateErr> {
let filename = format!("{}_{}", funding_txo.txid.to_hex(), funding_txo.index);
util::write_to_file(self.path_to_monitor_data(), filename, monitor)
.map_err(|_| chain::ChannelMonitorUpdateErr::PermanentFailure)
Expand Down Expand Up @@ -296,6 +301,8 @@ mod tests {
nodes[1].node.force_close_channel(&chan.2).unwrap();
check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed);
let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
let update_map = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap();
let update_id = update_map.get(&added_monitors[0].0.to_channel_id()).unwrap();

// Set the persister's directory to read-only, which should result in
// returning a permanent failure when we then attempt to persist a
Expand All @@ -309,7 +316,7 @@ mod tests {
txid: Txid::from_hex("8984484a580b825b9972d7adb15050b3ab624ccd731946b3eeddb92f4e7ef6be").unwrap(),
index: 0
};
match persister.persist_new_channel(test_txo, &added_monitors[0].1) {
match persister.persist_new_channel(test_txo, &added_monitors[0].1, update_id.2) {
Err(ChannelMonitorUpdateErr::PermanentFailure) => {},
_ => panic!("unexpected result from persisting new channel")
}
Expand All @@ -333,6 +340,8 @@ mod tests {
nodes[1].node.force_close_channel(&chan.2).unwrap();
check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed);
let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
let update_map = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap();
let update_id = update_map.get(&added_monitors[0].0.to_channel_id()).unwrap();

// Create the persister with an invalid directory name and test that the
// channel fails to open because the directories fail to be created. There
Expand All @@ -344,7 +353,7 @@ mod tests {
txid: Txid::from_hex("8984484a580b825b9972d7adb15050b3ab624ccd731946b3eeddb92f4e7ef6be").unwrap(),
index: 0
};
match persister.persist_new_channel(test_txo, &added_monitors[0].1) {
match persister.persist_new_channel(test_txo, &added_monitors[0].1, update_id.2) {
Err(ChannelMonitorUpdateErr::PermanentFailure) => {},
_ => panic!("unexpected result from persisting new channel")
}
Expand Down
Loading