From 60fad626331d871d83f560e371a41488636aa37a Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Sun, 12 Jan 2020 18:04:40 -0500 Subject: [PATCH 1/2] Fix EnforcingChannelKeys panic when our counterparty burns their $. If our counterparty burns their funds by revoking their current commitment transaction before we've sent them a new one, we'll step forward the remote commitment number. This would be otherwise fine (and may even encourage them to broadcast their revoked state(s) on chain), except that our new EnforcingChannelKeys expects us to not jump forward in time. Since it isn't too important that we punish our counterparty in such a corner-case, we opt to just close the channel in such a case and move on. --- lightning/src/ln/channel.rs | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index c86136d6070..ec2071428eb 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -1995,6 +1995,17 @@ impl Channel { self.channel_monitor.provide_secret(self.cur_remote_commitment_transaction_number + 1, msg.per_commitment_secret) .map_err(|e| ChannelError::Close(e.0))?; + if self.channel_state & ChannelState::AwaitingRemoteRevoke as u32 == 0 { + // Our counterparty seems to have burned their coins to us (by revoking a state when we + // haven't given them a new commitment transaction to broadcast). We should probably + // take advantage of this by updating our channel monitor, sending them an error, and + // waiting for them to broadcast their latest (now-revoked claim). But, that would be a + // lot of work, and there's some chance this is all a misunderstanding anyway. + // We have to do *something*, though, since our signer may get mad at us for otherwise + // jumping a remote commitment number, so best to just force-close and move on. + return Err(ChannelError::Close("Received an unexpected revoke_and_ack")); + } + // Update state now that we've passed all the can-fail calls... // (note that we may still fail to generate the new commitment_signed message, but that's // OK, we step the channel here and *then* if the new generation fails we can fail the From 1443509d77a1ee0edc16dbf1ff962861a1a5e1de Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Thu, 16 Jan 2020 18:53:32 -0500 Subject: [PATCH 2/2] Test that EnforcingChannelKeys doesn't panic on duplicate RAAs --- lightning/src/ln/channel.rs | 3 +++ lightning/src/ln/functional_tests.rs | 28 +++++++++++++++++++++++++++- 2 files changed, 30 insertions(+), 1 deletion(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index ec2071428eb..231ee832bef 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -240,7 +240,10 @@ pub(super) struct Channel { secp_ctx: Secp256k1, channel_value_satoshis: u64, + #[cfg(not(test))] local_keys: ChanSigner, + #[cfg(test)] + pub(super) local_keys: ChanSigner, shutdown_pubkey: PublicKey, // Our commitment numbers start at 2^48-1 and count down, whereas the ones used in transaction diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index ddd5f56b199..4006c87a684 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -9,7 +9,7 @@ use ln::channel::{COMMITMENT_TX_BASE_WEIGHT, COMMITMENT_TX_WEIGHT_PER_HTLC}; use ln::channelmanager::{ChannelManager,ChannelManagerReadArgs,HTLCForwardInfo,RAACommitmentOrder, PaymentPreimage, PaymentHash, BREAKDOWN_TIMEOUT}; use ln::channelmonitor::{ChannelMonitor, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ManyChannelMonitor, ANTI_REORG_DELAY}; use ln::channel::{Channel, ChannelError}; -use ln::onion_utils; +use ln::{chan_utils, onion_utils}; use ln::router::{Route, RouteHop}; use ln::features::{ChannelFeatures, InitFeatures, NodeFeatures}; use ln::msgs; @@ -6972,6 +6972,32 @@ fn test_set_outpoints_partial_claiming() { } } +#[test] +fn test_counterparty_raa_skip_no_crash() { + // Previously, if our counterparty sent two RAAs in a row without us having provided a + // commitment transaction, we would have happily carried on and provided them the next + // commitment transaction based on one RAA forward. This would probably eventually have led to + // channel closure, but it would not have resulted in funds loss. Still, our + // EnforcingChannelKeys would have paniced as it doesn't like jumps into the future. Here, we + // check simply that the channel is closed in response to such an RAA, but don't check whether + // we decide to punish our counterparty for revoking their funds (as we don't currently + // implement that). + let node_cfgs = create_node_cfgs(2); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported()).2; + + let commitment_seed = nodes[0].node.channel_state.lock().unwrap().by_id.get_mut(&channel_id).unwrap().local_keys.commitment_seed().clone(); + const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1; + let next_per_commitment_point = PublicKey::from_secret_key(&Secp256k1::new(), + &SecretKey::from_slice(&chan_utils::build_commitment_secret(&commitment_seed, INITIAL_COMMITMENT_NUMBER - 2)).unwrap()); + let per_commitment_secret = chan_utils::build_commitment_secret(&commitment_seed, INITIAL_COMMITMENT_NUMBER); + + nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), + &msgs::RevokeAndACK { channel_id, per_commitment_secret, next_per_commitment_point }); + assert_eq!(check_closed_broadcast!(nodes[1], true).unwrap().data, "Received an unexpected revoke_and_ack"); +} + #[test] fn test_bump_txn_sanitize_tracking_maps() { // Sanitizing pendning_claim_request and claimable_outpoints used to be buggy,