Skip to content

Commit e9d3033

Browse files
committed
Merge pull request #679 from ariard/2020-08-concurrent-watchtowers
Implement concurrent broadcast tolerance for distributed watchtowers
2 parents 26ee7e6 + 6622ea7 commit e9d3033

File tree

3 files changed

+139
-31
lines changed

3 files changed

+139
-31
lines changed

lightning/src/ln/channelmonitor.rs

+26-21
Original file line numberDiff line numberDiff line change
@@ -133,11 +133,19 @@ pub enum ChannelMonitorUpdateErr {
133133
TemporaryFailure,
134134
/// Used to indicate no further channel monitor updates will be allowed (eg we've moved on to a
135135
/// different watchtower and cannot update with all watchtowers that were previously informed
136-
/// of this channel). This will force-close the channel in question (which will generate one
137-
/// final ChannelMonitorUpdate which must be delivered to at least one ChannelMonitor copy).
136+
/// of this channel).
138137
///
139-
/// Should also be used to indicate a failure to update the local persisted copy of the channel
140-
/// monitor.
138+
/// At reception of this error, ChannelManager will force-close the channel and return at
139+
/// least a final ChannelMonitorUpdate::ChannelForceClosed which must be delivered to at
140+
/// least one ChannelMonitor copy. Revocation secret MUST NOT be released and offchain channel
141+
/// update must be rejected.
142+
///
143+
/// This failure may also signal a failure to update the local persisted copy of one of
144+
/// the channel monitor instance.
145+
///
146+
/// Note that even when you fail a holder commitment transaction update, you must store the
147+
/// update to ensure you can claim from it in case of a duplicate copy of this ChannelMonitor
148+
/// broadcasts it (e.g distributed channel-monitor deployment)
141149
PermanentFailure,
142150
}
143151

@@ -824,6 +832,10 @@ pub struct ChannelMonitor<ChanSigner: ChannelKeys> {
824832
// Set once we've signed a holder commitment transaction and handed it over to our
825833
// OnchainTxHandler. After this is set, no future updates to our holder commitment transactions
826834
// may occur, and we fail any such monitor updates.
835+
//
836+
// In case of update rejection due to a locally already signed commitment transaction, we
837+
// nevertheless store update content to track in case of concurrent broadcast by another
838+
// remote monitor out-of-order with regards to the block view.
827839
holder_tx_signed: bool,
828840

829841
// We simply modify last_block_hash in Channel's block_connected so that serialization is
@@ -888,6 +900,11 @@ pub trait ManyChannelMonitor: Send + Sync {
888900
///
889901
/// Any spends of outputs which should have been registered which aren't passed to
890902
/// ChannelMonitors via block_connected may result in FUNDS LOSS.
903+
///
904+
/// In case of distributed watchtowers deployment, even if an Err is return, the new version
905+
/// must be written to disk, as state may have been stored but rejected due to a block forcing
906+
/// a commitment broadcast. This storage is used to claim outputs of rejected state confirmed
907+
/// onchain by another watchtower, lagging behind on block processing.
891908
fn update_monitor(&self, funding_txo: OutPoint, monitor: ChannelMonitorUpdate) -> Result<(), ChannelMonitorUpdateErr>;
892909

893910
/// Used by ChannelManager to get list of HTLC resolved onchain and which needed to be updated
@@ -1167,12 +1184,7 @@ impl<ChanSigner: ChannelKeys> ChannelMonitor<ChanSigner> {
11671184
feerate_per_kw: initial_holder_commitment_tx.feerate_per_kw,
11681185
htlc_outputs: Vec::new(), // There are never any HTLCs in the initial commitment transactions
11691186
};
1170-
// Returning a monitor error before updating tracking points means in case of using
1171-
// a concurrent watchtower implementation for same channel, if this one doesn't
1172-
// reject update as we do, you MAY have the latest holder valid commitment tx onchain
1173-
// for which you want to spend outputs. We're NOT robust again this scenario right
1174-
// now but we should consider it later.
1175-
onchain_tx_handler.provide_latest_holder_tx(initial_holder_commitment_tx).unwrap();
1187+
onchain_tx_handler.provide_latest_holder_tx(initial_holder_commitment_tx);
11761188

11771189
ChannelMonitor {
11781190
latest_update_id: 0,
@@ -1327,9 +1339,6 @@ impl<ChanSigner: ChannelKeys> ChannelMonitor<ChanSigner> {
13271339
/// up-to-date as our holder commitment transaction is updated.
13281340
/// Panics if set_on_holder_tx_csv has never been called.
13291341
pub(super) fn provide_latest_holder_commitment_tx_info(&mut self, commitment_tx: HolderCommitmentTransaction, htlc_outputs: Vec<(HTLCOutputInCommitment, Option<Signature>, Option<HTLCSource>)>) -> Result<(), MonitorUpdateError> {
1330-
if self.holder_tx_signed {
1331-
return Err(MonitorUpdateError("A holder commitment tx has already been signed, no new holder commitment txn can be sent to our counterparty"));
1332-
}
13331342
let txid = commitment_tx.txid();
13341343
let sequence = commitment_tx.unsigned_tx.input[0].sequence as u64;
13351344
let locktime = commitment_tx.unsigned_tx.lock_time as u64;
@@ -1343,17 +1352,13 @@ impl<ChanSigner: ChannelKeys> ChannelMonitor<ChanSigner> {
13431352
feerate_per_kw: commitment_tx.feerate_per_kw,
13441353
htlc_outputs: htlc_outputs,
13451354
};
1346-
// Returning a monitor error before updating tracking points means in case of using
1347-
// a concurrent watchtower implementation for same channel, if this one doesn't
1348-
// reject update as we do, you MAY have the latest holder valid commitment tx onchain
1349-
// for which you want to spend outputs. We're NOT robust again this scenario right
1350-
// now but we should consider it later.
1351-
if let Err(_) = self.onchain_tx_handler.provide_latest_holder_tx(commitment_tx) {
1352-
return Err(MonitorUpdateError("Holder commitment signed has already been signed, no further update of LOCAL commitment transaction is allowed"));
1353-
}
1355+
self.onchain_tx_handler.provide_latest_holder_tx(commitment_tx);
13541356
self.current_holder_commitment_number = 0xffff_ffff_ffff - ((((sequence & 0xffffff) << 3*8) | (locktime as u64 & 0xffffff)) ^ self.commitment_transaction_number_obscure_factor);
13551357
mem::swap(&mut new_holder_commitment_tx, &mut self.current_holder_commitment_tx);
13561358
self.prev_holder_signed_commitment_tx = Some(new_holder_commitment_tx);
1359+
if self.holder_tx_signed {
1360+
return Err(MonitorUpdateError("Latest holder commitment signed has already been signed, update is rejected"));
1361+
}
13571362
Ok(())
13581363
}
13591364

lightning/src/ln/functional_tests.rs

+112
Original file line numberDiff line numberDiff line change
@@ -8735,3 +8735,115 @@ fn test_update_err_monitor_lockdown() {
87358735
let events = nodes[0].node.get_and_clear_pending_events();
87368736
assert_eq!(events.len(), 1);
87378737
}
8738+
8739+
#[test]
8740+
fn test_concurrent_monitor_claim() {
8741+
// Watchtower A receives block, broadcasts state N, then channel receives new state N+1,
8742+
// sending it to both watchtowers, Bob accepts N+1, then receives block and broadcasts
8743+
// the latest state N+1, Alice rejects state N+1, but Bob has already broadcast it,
8744+
// state N+1 confirms. Alice claims output from state N+1.
8745+
8746+
let chanmon_cfgs = create_chanmon_cfgs(2);
8747+
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
8748+
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
8749+
let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
8750+
8751+
// Create some initial channel
8752+
let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
8753+
let outpoint = OutPoint { txid: chan_1.3.txid(), index: 0 };
8754+
8755+
// Rebalance the network to generate htlc in the two directions
8756+
send_payment(&nodes[0], &vec!(&nodes[1])[..], 10_000_000, 10_000_000);
8757+
8758+
// Route a HTLC from node 0 to node 1 (but don't settle)
8759+
route_payment(&nodes[0], &vec!(&nodes[1])[..], 9_000_000).0;
8760+
8761+
// Copy SimpleManyChannelMonitor to simulate watchtower Alice and update block height her ChannelMonitor timeout HTLC onchain
8762+
let logger = test_utils::TestLogger::with_id(format!("node {}", "Alice"));
8763+
let chain_monitor = chaininterface::ChainWatchInterfaceUtil::new(Network::Testnet);
8764+
let watchtower_alice = {
8765+
let monitors = nodes[0].chan_monitor.simple_monitor.monitors.lock().unwrap();
8766+
let monitor = monitors.get(&outpoint).unwrap();
8767+
let mut w = test_utils::TestVecWriter(Vec::new());
8768+
monitor.write_for_disk(&mut w).unwrap();
8769+
let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor<EnforcingChannelKeys>)>::read(
8770+
&mut ::std::io::Cursor::new(&w.0)).unwrap().1;
8771+
assert!(new_monitor == *monitor);
8772+
let watchtower = test_utils::TestChannelMonitor::new(&chain_monitor, &chanmon_cfgs[0].tx_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator);
8773+
assert!(watchtower.add_monitor(outpoint, new_monitor).is_ok());
8774+
watchtower
8775+
};
8776+
let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
8777+
watchtower_alice.simple_monitor.block_connected(&header, 135, &vec![], &vec![]);
8778+
8779+
// Watchtower Alice should have broadcast a commitment/HTLC-timeout
8780+
{
8781+
let mut txn = chanmon_cfgs[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
8782+
assert_eq!(txn.len(), 2);
8783+
txn.clear();
8784+
}
8785+
8786+
// Copy SimpleManyChannelMonitor to simulate watchtower Bob and make it receive a commitment update first.
8787+
let logger = test_utils::TestLogger::with_id(format!("node {}", "Bob"));
8788+
let chain_monitor = chaininterface::ChainWatchInterfaceUtil::new(Network::Testnet);
8789+
let watchtower_bob = {
8790+
let monitors = nodes[0].chan_monitor.simple_monitor.monitors.lock().unwrap();
8791+
let monitor = monitors.get(&outpoint).unwrap();
8792+
let mut w = test_utils::TestVecWriter(Vec::new());
8793+
monitor.write_for_disk(&mut w).unwrap();
8794+
let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor<EnforcingChannelKeys>)>::read(
8795+
&mut ::std::io::Cursor::new(&w.0)).unwrap().1;
8796+
assert!(new_monitor == *monitor);
8797+
let watchtower = test_utils::TestChannelMonitor::new(&chain_monitor, &chanmon_cfgs[0].tx_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator);
8798+
assert!(watchtower.add_monitor(outpoint, new_monitor).is_ok());
8799+
watchtower
8800+
};
8801+
let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
8802+
watchtower_bob.simple_monitor.block_connected(&header, 134, &vec![], &vec![]);
8803+
8804+
// Route another payment to generate another update with still previous HTLC pending
8805+
let (_, payment_hash) = get_payment_preimage_hash!(nodes[0]);
8806+
{
8807+
let net_graph_msg_handler = &nodes[1].net_graph_msg_handler;
8808+
let route = get_route(&nodes[1].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[0].node.get_our_node_id(), None, &Vec::new(), 3000000 , TEST_FINAL_CLTV, &logger).unwrap();
8809+
nodes[1].node.send_payment(&route, payment_hash, &None).unwrap();
8810+
}
8811+
check_added_monitors!(nodes[1], 1);
8812+
8813+
let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
8814+
assert_eq!(updates.update_add_htlcs.len(), 1);
8815+
nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &updates.update_add_htlcs[0]);
8816+
if let Some(ref mut channel) = nodes[0].node.channel_state.lock().unwrap().by_id.get_mut(&chan_1.2) {
8817+
if let Ok((_, _, _, update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].fee_estimator, &node_cfgs[0].logger) {
8818+
// Watchtower Alice should already have seen the block and reject the update
8819+
if let Err(_) = watchtower_alice.simple_monitor.update_monitor(outpoint, update.clone()) {} else { assert!(false); }
8820+
if let Ok(_) = watchtower_bob.simple_monitor.update_monitor(outpoint, update.clone()) {} else { assert!(false); }
8821+
if let Ok(_) = nodes[0].chan_monitor.update_monitor(outpoint, update) {} else { assert!(false); }
8822+
} else { assert!(false); }
8823+
} else { assert!(false); };
8824+
// Our local monitor is in-sync and hasn't processed yet timeout
8825+
check_added_monitors!(nodes[0], 1);
8826+
8827+
//// Provide one more block to watchtower Bob, expect broadcast of commitment and HTLC-Timeout
8828+
watchtower_bob.simple_monitor.block_connected(&header, 135, &vec![], &vec![]);
8829+
8830+
// Watchtower Bob should have broadcast a commitment/HTLC-timeout
8831+
let bob_state_y;
8832+
{
8833+
let mut txn = chanmon_cfgs[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
8834+
assert_eq!(txn.len(), 2);
8835+
bob_state_y = txn[0].clone();
8836+
txn.clear();
8837+
};
8838+
8839+
// We confirm Bob's state Y on Alice, she should broadcast a HTLC-timeout
8840+
watchtower_alice.simple_monitor.block_connected(&header, 136, &vec![&bob_state_y][..], &vec![]);
8841+
{
8842+
let htlc_txn = chanmon_cfgs[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
8843+
// We broadcast twice the transaction, once due to the HTLC-timeout, once due
8844+
// the onchain detection of the HTLC output
8845+
assert_eq!(htlc_txn.len(), 2);
8846+
check_spends!(htlc_txn[0], bob_state_y);
8847+
check_spends!(htlc_txn[1], bob_state_y);
8848+
}
8849+
}

lightning/src/ln/onchaintx.rs

+1-10
Original file line numberDiff line numberDiff line change
@@ -877,18 +877,9 @@ impl<ChanSigner: ChannelKeys> OnchainTxHandler<ChanSigner> {
877877
}
878878
}
879879

880-
pub(super) fn provide_latest_holder_tx(&mut self, tx: HolderCommitmentTransaction) -> Result<(), ()> {
881-
// To prevent any unsafe state discrepancy between offchain and onchain, once holder
882-
// commitment transaction has been signed due to an event (either block height for
883-
// HTLC-timeout or channel force-closure), don't allow any further update of holder
884-
// commitment transaction view to avoid delivery of revocation secret to counterparty
885-
// for the aformentionned signed transaction.
886-
if self.holder_htlc_sigs.is_some() || self.prev_holder_htlc_sigs.is_some() {
887-
return Err(());
888-
}
880+
pub(super) fn provide_latest_holder_tx(&mut self, tx: HolderCommitmentTransaction) {
889881
self.prev_holder_commitment = self.holder_commitment.take();
890882
self.holder_commitment = Some(tx);
891-
Ok(())
892883
}
893884

894885
fn sign_latest_holder_htlcs(&mut self) {

0 commit comments

Comments
 (0)