From 516e5e6f58098115c73ff39238caab0c527db0af Mon Sep 17 00:00:00 2001 From: Alec Chen Date: Tue, 18 Jun 2024 14:26:25 -0700 Subject: [PATCH 1/6] Split HolderCommitmentPoint::advance off into separate function --- lightning/src/ln/channel.rs | 35 +++++++++++++++++++++++++++++------ 1 file changed, 29 insertions(+), 6 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index fc75b2ff5c6..ccba85c60c1 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -992,6 +992,34 @@ impl HolderCommitmentPoint { } } + /// If we are pending the next commitment point, this method tries asking the signer again, + /// and transitions to the next state if successful. + /// + /// This method is used for the following transitions: + /// - `PendingNext` -> `Available` + pub fn try_resolve_pending(&mut self, signer: &ChannelSignerType, secp_ctx: &Secp256k1, logger: &L) + where SP::Target: SignerProvider, L::Target: Logger + { + if let HolderCommitmentPoint::PendingNext { transaction_number, current } = self { + let next = signer.as_ref().get_per_commitment_point(*transaction_number - 1, secp_ctx); + log_trace!(logger, "Retrieved next per-commitment point {}", *transaction_number - 1); + *self = HolderCommitmentPoint::Available { transaction_number: *transaction_number, current: *current, next }; + } + } + + /// If we are not pending the next commitment point, this method advances the commitment number + /// and requests the next commitment point from the signer. + /// + /// If our signer is not ready to provide the next commitment point, we will + /// only advance to `PendingNext`, and should be tried again later in `signer_unblocked` + /// via `try_resolve_pending`. + /// + /// If our signer is ready to provide the next commitment point, we will advance all the + /// way to `Available`. + /// + /// This method is used for the following transitions: + /// - `Available` -> `PendingNext` + /// - `Available` -> `PendingNext` -> `Available` (in one fell swoop) pub fn advance(&mut self, signer: &ChannelSignerType, secp_ctx: &Secp256k1, logger: &L) where SP::Target: SignerProvider, L::Target: Logger { @@ -1000,12 +1028,7 @@ impl HolderCommitmentPoint { transaction_number: *transaction_number - 1, current: *next, }; - } - - if let HolderCommitmentPoint::PendingNext { transaction_number, current } = self { - let next = signer.as_ref().get_per_commitment_point(*transaction_number - 1, secp_ctx); - log_trace!(logger, "Retrieved next per-commitment point {}", *transaction_number - 1); - *self = HolderCommitmentPoint::Available { transaction_number: *transaction_number, current: *current, next }; + self.try_resolve_pending(signer, secp_ctx, logger); } } } From 9c2a050a35831565e1be895b0967076dbf9e83f8 Mon Sep 17 00:00:00 2001 From: Alec Chen Date: Tue, 18 Jun 2024 14:48:23 -0700 Subject: [PATCH 2/6] Return an error if we fail to advance our commitment number --- lightning/src/ln/channel.rs | 41 ++++++++++++++++++++++++++++++++----- 1 file changed, 36 insertions(+), 5 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index ccba85c60c1..3f7d01bb891 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -1008,7 +1008,9 @@ impl HolderCommitmentPoint { } /// If we are not pending the next commitment point, this method advances the commitment number - /// and requests the next commitment point from the signer. + /// and requests the next commitment point from the signer. Returns `Ok` if we were at + /// `Available` and were able to advance our commitment number (even if we are still pending + /// the next commitment point). /// /// If our signer is not ready to provide the next commitment point, we will /// only advance to `PendingNext`, and should be tried again later in `signer_unblocked` @@ -1020,7 +1022,9 @@ impl HolderCommitmentPoint { /// This method is used for the following transitions: /// - `Available` -> `PendingNext` /// - `Available` -> `PendingNext` -> `Available` (in one fell swoop) - pub fn advance(&mut self, signer: &ChannelSignerType, secp_ctx: &Secp256k1, logger: &L) + pub fn advance( + &mut self, signer: &ChannelSignerType, secp_ctx: &Secp256k1, logger: &L + ) -> Result<(), ()> where SP::Target: SignerProvider, L::Target: Logger { if let HolderCommitmentPoint::Available { transaction_number, next, .. } = self { @@ -1029,7 +1033,9 @@ impl HolderCommitmentPoint { current: *next, }; self.try_resolve_pending(signer, secp_ctx, logger); + return Ok(()); } + Err(()) } } @@ -4618,7 +4624,18 @@ impl Channel where channel_id: Some(self.context.channel_id()), }; - self.context.holder_commitment_point.advance(&self.context.holder_signer, &self.context.secp_ctx, logger); + if self.context.holder_commitment_point.advance(&self.context.holder_signer, &self.context.secp_ctx, logger).is_err() { + // We only fail to advance our commitment point/number if we're currently + // waiting for our signer to unblock and provide a commitment point. + // During post-funding channel operation, we only advance our point upon + // receiving a commitment_signed, and our counterparty cannot send us + // another commitment signed until we've provided a new commitment point + // in revoke_and_ack, which requires unblocking our signer and completing + // the advance to the next point. This should be unreachable since + // a new commitment_signed should fail at our signature checks above. + debug_assert!(false, "We should be ready to advance our commitment point by the time we receive commitment_signed"); + return Err(ChannelError::close("Failed to advance our commitment point".to_owned())); + } self.context.expecting_peer_commitment_signed = false; // Note that if we need_commitment & !AwaitingRemoteRevoke we'll call // build_commitment_no_status_check() next which will reset this to RAAFirst. @@ -7799,7 +7816,14 @@ impl OutboundV1Channel where SP::Target: SignerProvider { } else { self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new()); } - self.context.holder_commitment_point.advance(&self.context.holder_signer, &self.context.secp_ctx, logger); + if self.context.holder_commitment_point.advance(&self.context.holder_signer, &self.context.secp_ctx, logger).is_err() { + // We only fail to advance our commitment point/number if we're currently + // waiting for our signer to unblock and provide a commitment point. + // We cannot send open_channel before this has occurred, so if we + // err here by the time we receive funding_signed, something has gone wrong. + debug_assert!(false, "We should be ready to advance our commitment point by the time we receive funding_signed"); + return Err((self, ChannelError::close("Failed to advance holder commitment point".to_owned()))); + } self.context.cur_counterparty_commitment_transaction_number -= 1; log_info!(logger, "Received funding_signed from peer for channel {}", &self.context.channel_id()); @@ -8066,7 +8090,14 @@ impl InboundV1Channel where SP::Target: SignerProvider { self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new()); self.context.channel_id = ChannelId::v1_from_funding_outpoint(funding_txo); self.context.cur_counterparty_commitment_transaction_number -= 1; - self.context.holder_commitment_point.advance(&self.context.holder_signer, &self.context.secp_ctx, logger); + if self.context.holder_commitment_point.advance(&self.context.holder_signer, &self.context.secp_ctx, logger).is_err() { + // We only fail to advance our commitment point/number if we're currently + // waiting for our signer to unblock and provide a commitment point. + // We cannot send accept_channel before this has occurred, so if we + // err here by the time we receive funding_created, something has gone wrong. + debug_assert!(false, "We should be ready to advance our commitment point by the time we receive funding_created"); + return Err((self, ChannelError::close("Failed to advance holder commitment point".to_owned()))); + } let (counterparty_initial_commitment_tx, funding_signed) = self.context.get_funding_signed_msg(logger); From 1fa67d943463b20f989be91bf5c26d69c2a4e1de Mon Sep 17 00:00:00 2001 From: Alec Chen Date: Thu, 30 May 2024 15:25:29 -0500 Subject: [PATCH 3/6] Add logger as parameter in creating ChannelContext --- lightning/src/ln/channel.rs | 62 ++++++++++++++++------------ lightning/src/ln/channelmanager.rs | 2 +- lightning/src/ln/functional_tests.rs | 5 ++- 3 files changed, 40 insertions(+), 29 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 3f7d01bb891..6d72bf0eb9a 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -1814,7 +1814,7 @@ impl ChannelContext where SP::Target: SignerProvider { Ok(channel_context) } - fn new_for_outbound_channel<'a, ES: Deref, F: Deref>( + fn new_for_outbound_channel<'a, ES: Deref, F: Deref, L: Deref>( fee_estimator: &'a LowerBoundedFeeEstimator, entropy_source: &'a ES, signer_provider: &'a SP, @@ -1831,11 +1831,13 @@ impl ChannelContext where SP::Target: SignerProvider { channel_keys_id: [u8; 32], holder_signer: ::EcdsaSigner, pubkeys: ChannelPublicKeys, + _logger: L, ) -> Result, APIError> where ES::Target: EntropySource, F::Target: FeeEstimator, SP::Target: SignerProvider, + L::Target: Logger, { // This will be updated with the counterparty contribution if this is a dual-funded channel let channel_value_satoshis = funding_satoshis; @@ -7525,13 +7527,14 @@ pub(super) struct OutboundV1Channel where SP::Target: SignerProvider } impl OutboundV1Channel where SP::Target: SignerProvider { - pub fn new( + pub fn new( fee_estimator: &LowerBoundedFeeEstimator, entropy_source: &ES, signer_provider: &SP, counterparty_node_id: PublicKey, their_features: &InitFeatures, channel_value_satoshis: u64, push_msat: u64, user_id: u128, config: &UserConfig, current_chain_height: u32, - outbound_scid_alias: u64, temporary_channel_id: Option + outbound_scid_alias: u64, temporary_channel_id: Option, logger: L ) -> Result, APIError> where ES::Target: EntropySource, - F::Target: FeeEstimator + F::Target: FeeEstimator, + L::Target: Logger, { let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(channel_value_satoshis, config); if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS { @@ -7563,6 +7566,7 @@ impl OutboundV1Channel where SP::Target: SignerProvider { channel_keys_id, holder_signer, pubkeys, + logger, )?, unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 } }; @@ -8149,14 +8153,15 @@ pub(super) struct OutboundV2Channel where SP::Target: SignerProvider #[cfg(any(dual_funding, splicing))] impl OutboundV2Channel where SP::Target: SignerProvider { - pub fn new( + pub fn new( fee_estimator: &LowerBoundedFeeEstimator, entropy_source: &ES, signer_provider: &SP, counterparty_node_id: PublicKey, their_features: &InitFeatures, funding_satoshis: u64, user_id: u128, config: &UserConfig, current_chain_height: u32, outbound_scid_alias: u64, - funding_confirmation_target: ConfirmationTarget, + funding_confirmation_target: ConfirmationTarget, logger: L, ) -> Result, APIError> where ES::Target: EntropySource, F::Target: FeeEstimator, + L::Target: Logger, { let channel_keys_id = signer_provider.generate_channel_keys_id(false, funding_satoshis, user_id); let holder_signer = signer_provider.derive_channel_signer(funding_satoshis, channel_keys_id); @@ -8188,6 +8193,7 @@ impl OutboundV2Channel where SP::Target: SignerProvider { channel_keys_id, holder_signer, pubkeys, + logger, )?, unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 }, dual_funding_context: DualFundingChannelContext { @@ -9595,11 +9601,12 @@ mod tests { keys_provider.expect(OnGetShutdownScriptpubkey { returns: non_v0_segwit_shutdown_script.clone(), }); + let logger = test_utils::TestLogger::new(); let secp_ctx = Secp256k1::new(); let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap()); let config = UserConfig::default(); - match OutboundV1Channel::<&TestKeysInterface>::new(&LowerBoundedFeeEstimator::new(&TestFeeEstimator { fee_est: 253 }), &&keys_provider, &&keys_provider, node_id, &features, 10000000, 100000, 42, &config, 0, 42, None) { + match OutboundV1Channel::<&TestKeysInterface>::new(&LowerBoundedFeeEstimator::new(&TestFeeEstimator { fee_est: 253 }), &&keys_provider, &&keys_provider, node_id, &features, 10000000, 100000, 42, &config, 0, 42, None, &logger) { Err(APIError::IncompatibleShutdownScript { script }) => { assert_eq!(script.into_inner(), non_v0_segwit_shutdown_script.into_inner()); }, @@ -9619,10 +9626,11 @@ mod tests { let seed = [42; 32]; let network = Network::Testnet; let keys_provider = test_utils::TestKeysInterface::new(&seed, network); + let logger = test_utils::TestLogger::new(); let node_a_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap()); let config = UserConfig::default(); - let node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&bounded_fee_estimator, &&keys_provider, &&keys_provider, node_a_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap(); + let node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&bounded_fee_estimator, &&keys_provider, &&keys_provider, node_a_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None, &logger).unwrap(); // Now change the fee so we can check that the fee in the open_channel message is the // same as the old fee. @@ -9649,7 +9657,7 @@ mod tests { // Create Node A's channel pointing to Node B's pubkey let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap()); let config = UserConfig::default(); - let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap(); + let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None, &logger).unwrap(); // Create Node B's channel by receiving Node A's open_channel message // Make sure A's dust limit is as we expect. @@ -9729,10 +9737,11 @@ mod tests { let seed = [42; 32]; let network = Network::Testnet; let keys_provider = test_utils::TestKeysInterface::new(&seed, network); + let logger = test_utils::TestLogger::new(); let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap()); let config = UserConfig::default(); - let mut chan = OutboundV1Channel::<&TestKeysInterface>::new(&fee_est, &&keys_provider, &&keys_provider, node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap(); + let mut chan = OutboundV1Channel::<&TestKeysInterface>::new(&fee_est, &&keys_provider, &&keys_provider, node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None, &logger).unwrap(); let commitment_tx_fee_0_htlcs = commit_tx_fee_msat(chan.context.feerate_per_kw, 0, chan.context.get_channel_type()); let commitment_tx_fee_1_htlc = commit_tx_fee_msat(chan.context.feerate_per_kw, 1, chan.context.get_channel_type()); @@ -9781,7 +9790,7 @@ mod tests { // Create Node A's channel pointing to Node B's pubkey let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap()); let config = UserConfig::default(); - let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap(); + let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None, &logger).unwrap(); // Create Node B's channel by receiving Node A's open_channel message let open_channel_msg = node_a_chan.get_open_channel(chain_hash); @@ -9845,12 +9854,12 @@ mod tests { // Test that `OutboundV1Channel::new` creates a channel with the correct value for // `holder_max_htlc_value_in_flight_msat`, when configured with a valid percentage value, // which is set to the lower bound + 1 (2%) of the `channel_value`. - let chan_1 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_2_percent), 10000000, 100000, 42, &config_2_percent, 0, 42, None).unwrap(); + let chan_1 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_2_percent), 10000000, 100000, 42, &config_2_percent, 0, 42, None, &logger).unwrap(); let chan_1_value_msat = chan_1.context.channel_value_satoshis * 1000; assert_eq!(chan_1.context.holder_max_htlc_value_in_flight_msat, (chan_1_value_msat as f64 * 0.02) as u64); // Test with the upper bound - 1 of valid values (99%). - let chan_2 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_99_percent), 10000000, 100000, 42, &config_99_percent, 0, 42, None).unwrap(); + let chan_2 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_99_percent), 10000000, 100000, 42, &config_99_percent, 0, 42, None, &logger).unwrap(); let chan_2_value_msat = chan_2.context.channel_value_satoshis * 1000; assert_eq!(chan_2.context.holder_max_htlc_value_in_flight_msat, (chan_2_value_msat as f64 * 0.99) as u64); @@ -9870,14 +9879,14 @@ mod tests { // Test that `OutboundV1Channel::new` uses the lower bound of the configurable percentage values (1%) // if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a value less than 1. - let chan_5 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_0_percent), 10000000, 100000, 42, &config_0_percent, 0, 42, None).unwrap(); + let chan_5 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_0_percent), 10000000, 100000, 42, &config_0_percent, 0, 42, None, &logger).unwrap(); let chan_5_value_msat = chan_5.context.channel_value_satoshis * 1000; assert_eq!(chan_5.context.holder_max_htlc_value_in_flight_msat, (chan_5_value_msat as f64 * 0.01) as u64); // Test that `OutboundV1Channel::new` uses the upper bound of the configurable percentage values // (100%) if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a larger value // than 100. - let chan_6 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_101_percent), 10000000, 100000, 42, &config_101_percent, 0, 42, None).unwrap(); + let chan_6 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_101_percent), 10000000, 100000, 42, &config_101_percent, 0, 42, None, &logger).unwrap(); let chan_6_value_msat = chan_6.context.channel_value_satoshis * 1000; assert_eq!(chan_6.context.holder_max_htlc_value_in_flight_msat, chan_6_value_msat); @@ -9930,7 +9939,7 @@ mod tests { let mut outbound_node_config = UserConfig::default(); outbound_node_config.channel_handshake_config.their_channel_reserve_proportional_millionths = (outbound_selected_channel_reserve_perc * 1_000_000.0) as u32; - let chan = OutboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&outbound_node_config), channel_value_satoshis, 100_000, 42, &outbound_node_config, 0, 42, None).unwrap(); + let chan = OutboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&outbound_node_config), channel_value_satoshis, 100_000, 42, &outbound_node_config, 0, 42, None, &logger).unwrap(); let expected_outbound_selected_chan_reserve = cmp::max(MIN_THEIR_CHAN_RESERVE_SATOSHIS, (chan.context.channel_value_satoshis as f64 * outbound_selected_channel_reserve_perc) as u64); assert_eq!(chan.context.holder_selected_channel_reserve_satoshis, expected_outbound_selected_chan_reserve); @@ -9967,7 +9976,7 @@ mod tests { // Create Node A's channel pointing to Node B's pubkey let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap()); let config = UserConfig::default(); - let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap(); + let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None, &logger).unwrap(); // Create Node B's channel by receiving Node A's open_channel message // Make sure A's dust limit is as we expect. @@ -10044,7 +10053,7 @@ mod tests { let config = UserConfig::default(); let features = channelmanager::provided_init_features(&config); let mut outbound_chan = OutboundV1Channel::<&TestKeysInterface>::new( - &feeest, &&keys_provider, &&keys_provider, node_b_node_id, &features, 10000000, 100000, 42, &config, 0, 42, None + &feeest, &&keys_provider, &&keys_provider, node_b_node_id, &features, 10000000, 100000, 42, &config, 0, 42, None, &logger ).unwrap(); let inbound_chan = InboundV1Channel::<&TestKeysInterface>::new( &feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), @@ -10198,7 +10207,7 @@ mod tests { let counterparty_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap()); let mut config = UserConfig::default(); config.channel_handshake_config.announced_channel = false; - let mut chan = OutboundV1Channel::<&Keys>::new(&LowerBoundedFeeEstimator::new(&feeest), &&keys_provider, &&keys_provider, counterparty_node_id, &channelmanager::provided_init_features(&config), 10_000_000, 0, 42, &config, 0, 42, None).unwrap(); // Nothing uses their network key in this test + let mut chan = OutboundV1Channel::<&Keys>::new(&LowerBoundedFeeEstimator::new(&feeest), &&keys_provider, &&keys_provider, counterparty_node_id, &channelmanager::provided_init_features(&config), 10_000_000, 0, 42, &config, 0, 42, None, &*logger).unwrap(); // Nothing uses their network key in this test chan.context.holder_dust_limit_satoshis = 546; chan.context.counterparty_selected_channel_reserve_satoshis = Some(0); // Filled in in accept_channel @@ -10945,7 +10954,7 @@ mod tests { let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap()); let config = UserConfig::default(); let node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, - node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap(); + node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None, &logger).unwrap(); let mut channel_type_features = ChannelTypeFeatures::only_static_remote_key(); channel_type_features.set_zero_conf_required(); @@ -10980,7 +10989,7 @@ mod tests { let channel_a = OutboundV1Channel::<&TestKeysInterface>::new( &fee_estimator, &&keys_provider, &&keys_provider, node_id_b, &channelmanager::provided_init_features(&UserConfig::default()), 10000000, 100000, 42, - &config, 0, 42, None + &config, 0, 42, None, &logger ).unwrap(); assert!(!channel_a.context.channel_type.supports_anchors_zero_fee_htlc_tx()); @@ -10991,7 +11000,7 @@ mod tests { let channel_a = OutboundV1Channel::<&TestKeysInterface>::new( &fee_estimator, &&keys_provider, &&keys_provider, node_id_b, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, - None + None, &logger ).unwrap(); let open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network)); @@ -11029,7 +11038,7 @@ mod tests { let channel_a = OutboundV1Channel::<&TestKeysInterface>::new( &fee_estimator, &&keys_provider, &&keys_provider, node_id_b, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, - None + None, &logger ).unwrap(); // Set `channel_type` to `None` to force the implicit feature negotiation. @@ -11076,7 +11085,7 @@ mod tests { let channel_a = OutboundV1Channel::<&TestKeysInterface>::new( &fee_estimator, &&keys_provider, &&keys_provider, node_id_b, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, - None + None, &logger ).unwrap(); let mut open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network)); @@ -11095,7 +11104,7 @@ mod tests { // LDK. let mut channel_a = OutboundV1Channel::<&TestKeysInterface>::new( &fee_estimator, &&keys_provider, &&keys_provider, node_id_b, &simple_anchors_init, - 10000000, 100000, 42, &config, 0, 42, None + 10000000, 100000, 42, &config, 0, 42, None, &logger ).unwrap(); let open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network)); @@ -11145,7 +11154,8 @@ mod tests { &config, 0, 42, - None + None, + &logger ).unwrap(); let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network)); diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 89f967c4f70..30e4df67578 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -3074,7 +3074,7 @@ where let config = if override_config.is_some() { override_config.as_ref().unwrap() } else { &self.default_configuration }; match OutboundV1Channel::new(&self.fee_estimator, &self.entropy_source, &self.signer_provider, their_network_key, their_features, channel_value_satoshis, push_msat, user_channel_id, config, - self.best_block.read().unwrap().height, outbound_scid_alias, temporary_channel_id) + self.best_block.read().unwrap().height, outbound_scid_alias, temporary_channel_id, &*self.logger) { Ok(res) => res, Err(e) => { diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index a01388f2015..e56fbf76c5e 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -31,7 +31,7 @@ use crate::ln::features::{ChannelFeatures, ChannelTypeFeatures, NodeFeatures}; use crate::ln::msgs; use crate::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler, ErrorAction}; use crate::util::test_channel_signer::TestChannelSigner; -use crate::util::test_utils::{self, WatchtowerPersister}; +use crate::util::test_utils::{self, TestLogger, WatchtowerPersister}; use crate::util::errors::APIError; use crate::util::ser::{Writeable, ReadableArgs}; use crate::util::string::UntrustedString; @@ -7244,11 +7244,12 @@ fn test_user_configurable_csv_delay() { let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &user_cfgs); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let logger = TestLogger::new(); // We test config.our_to_self > BREAKDOWN_TIMEOUT is enforced in OutboundV1Channel::new() if let Err(error) = OutboundV1Channel::new(&LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }), &nodes[0].keys_manager, &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), &nodes[1].node.init_features(), 1000000, 1000000, 0, - &low_our_to_self_config, 0, 42, None) + &low_our_to_self_config, 0, 42, None, &logger) { match error { APIError::APIMisuseError { err } => { assert!(regex::Regex::new(r"Configured with an unreasonable our_to_self_delay \(\d+\) putting user funds at risks").unwrap().is_match(err.as_str())); }, From 1f7f3a366c9e62cff5a5025724b5b508255a89d7 Mon Sep 17 00:00:00 2001 From: Alec Chen Date: Wed, 5 Jun 2024 17:40:38 -0500 Subject: [PATCH 4/6] Change get_per_commitment_point to return result type Includes simple changes to test util signers and tests, as well as handling the error case for get_per_commitment_point in HolderCommitmentPoint. This leaves a couple `.expect`s in places that will be addressed in a separate PR for handling funding. --- lightning/src/ln/channel.rs | 47 +++++++++++++++-------- lightning/src/ln/functional_tests.rs | 8 ++-- lightning/src/sign/mod.rs | 19 +++++++-- lightning/src/util/test_channel_signer.rs | 4 +- 4 files changed, 53 insertions(+), 25 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 6d72bf0eb9a..9a824b2d6a6 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -962,8 +962,12 @@ impl HolderCommitmentPoint { { HolderCommitmentPoint::Available { transaction_number: INITIAL_COMMITMENT_NUMBER, - current: signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER, secp_ctx), - next: signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, secp_ctx), + // TODO(async_signing): remove this expect with the Uninitialized variant + current: signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER, secp_ctx) + .expect("Signer must be able to provide initial commitment point"), + // TODO(async_signing): remove this expect with the Uninitialized variant + next: signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, secp_ctx) + .expect("Signer must be able to provide second commitment point"), } } @@ -1001,9 +1005,12 @@ impl HolderCommitmentPoint { where SP::Target: SignerProvider, L::Target: Logger { if let HolderCommitmentPoint::PendingNext { transaction_number, current } = self { - let next = signer.as_ref().get_per_commitment_point(*transaction_number - 1, secp_ctx); - log_trace!(logger, "Retrieved next per-commitment point {}", *transaction_number - 1); - *self = HolderCommitmentPoint::Available { transaction_number: *transaction_number, current: *current, next }; + if let Ok(next) = signer.as_ref().get_per_commitment_point(*transaction_number - 1, secp_ctx) { + log_trace!(logger, "Retrieved next per-commitment point {}", *transaction_number - 1); + *self = HolderCommitmentPoint::Available { transaction_number: *transaction_number, current: *current, next }; + } else { + log_trace!(logger, "Next per-commitment point {} is pending", transaction_number); + } } } @@ -5622,7 +5629,9 @@ impl Channel where let our_commitment_transaction = INITIAL_COMMITMENT_NUMBER - self.context.holder_commitment_point.transaction_number() - 1; if msg.next_remote_commitment_number > 0 { - let expected_point = self.context.holder_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - msg.next_remote_commitment_number + 1, &self.context.secp_ctx); + let expected_point = self.context.holder_signer.as_ref() + .get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - msg.next_remote_commitment_number + 1, &self.context.secp_ctx) + .expect("TODO: async signing is not yet supported for per commitment points upon channel reestablishment"); let given_secret = SecretKey::from_slice(&msg.your_last_per_commitment_secret) .map_err(|_| ChannelError::close("Peer sent a garbage channel_reestablish with unparseable secret key".to_owned()))?; if expected_point != PublicKey::from_secret_key(&self.context.secp_ctx, &given_secret) { @@ -8230,10 +8239,12 @@ impl OutboundV2Channel where SP::Target: SignerProvider { let first_per_commitment_point = self.context.holder_signer.as_ref() .get_per_commitment_point(self.context.holder_commitment_point.transaction_number(), - &self.context.secp_ctx); + &self.context.secp_ctx) + .expect("TODO: async signing is not yet supported for commitment points in v2 channel establishment"); let second_per_commitment_point = self.context.holder_signer.as_ref() .get_per_commitment_point(self.context.holder_commitment_point.transaction_number() - 1, - &self.context.secp_ctx); + &self.context.secp_ctx) + .expect("TODO: async signing is not yet supported for commitment points in v2 channel establishment"); let keys = self.context.get_holder_pubkeys(); msgs::OpenChannelV2 { @@ -8380,9 +8391,11 @@ impl InboundV2Channel where SP::Target: SignerProvider { /// [`msgs::AcceptChannelV2`]: crate::ln::msgs::AcceptChannelV2 fn generate_accept_channel_v2_message(&self) -> msgs::AcceptChannelV2 { let first_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point( - self.context.holder_commitment_point.transaction_number(), &self.context.secp_ctx); + self.context.holder_commitment_point.transaction_number(), &self.context.secp_ctx) + .expect("TODO: async signing is not yet supported for commitment points in v2 channel establishment"); let second_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point( - self.context.holder_commitment_point.transaction_number() - 1, &self.context.secp_ctx); + self.context.holder_commitment_point.transaction_number() - 1, &self.context.secp_ctx) + .expect("TODO: async signing is not yet supported for commitment points in v2 channel establishment"); let keys = self.context.get_holder_pubkeys(); msgs::AcceptChannelV2 { @@ -9334,14 +9347,16 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch (Some(current), Some(next)) => HolderCommitmentPoint::Available { transaction_number: cur_holder_commitment_transaction_number, current, next }, - (Some(current), _) => HolderCommitmentPoint::Available { + (Some(current), _) => HolderCommitmentPoint::PendingNext { transaction_number: cur_holder_commitment_transaction_number, current, - next: holder_signer.get_per_commitment_point(cur_holder_commitment_transaction_number - 1, &secp_ctx), }, - (_, _) => HolderCommitmentPoint::Available { - transaction_number: cur_holder_commitment_transaction_number, - current: holder_signer.get_per_commitment_point(cur_holder_commitment_transaction_number, &secp_ctx), - next: holder_signer.get_per_commitment_point(cur_holder_commitment_transaction_number - 1, &secp_ctx), + (_, _) => { + // TODO(async_signing): remove this expect with the Uninitialized variant + let current = holder_signer.get_per_commitment_point(cur_holder_commitment_transaction_number, &secp_ctx) + .expect("Must be able to derive the current commitment point upon channel restoration"); + HolderCommitmentPoint::PendingNext { + transaction_number: cur_holder_commitment_transaction_number, current, + } }, }; diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index e56fbf76c5e..6a0343fb533 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -741,7 +741,7 @@ fn test_update_fee_that_funder_cannot_afford() { (pubkeys.revocation_basepoint, pubkeys.htlc_basepoint, pubkeys.funding_pubkey) }; - let (remote_delayed_payment_basepoint, remote_htlc_basepoint,remote_point, remote_funding) = { + let (remote_delayed_payment_basepoint, remote_htlc_basepoint, remote_point, remote_funding) = { let per_peer_state = nodes[1].node.per_peer_state.read().unwrap(); let chan_lock = per_peer_state.get(&nodes[0].node.get_our_node_id()).unwrap().lock().unwrap(); let remote_chan = chan_lock.channel_by_id.get(&chan.2).map( @@ -750,7 +750,7 @@ fn test_update_fee_that_funder_cannot_afford() { let chan_signer = remote_chan.get_signer(); let pubkeys = chan_signer.as_ref().pubkeys(); (pubkeys.delayed_payment_basepoint, pubkeys.htlc_basepoint, - chan_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &secp_ctx), + chan_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &secp_ctx).unwrap(), pubkeys.funding_pubkey) }; @@ -1475,7 +1475,7 @@ fn test_fee_spike_violation_fails_htlc() { let pubkeys = chan_signer.as_ref().pubkeys(); (pubkeys.revocation_basepoint, pubkeys.htlc_basepoint, chan_signer.as_ref().release_commitment_secret(INITIAL_COMMITMENT_NUMBER), - chan_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 2, &secp_ctx), + chan_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 2, &secp_ctx).unwrap(), chan_signer.as_ref().pubkeys().funding_pubkey) }; let (remote_delayed_payment_basepoint, remote_htlc_basepoint, remote_point, remote_funding) = { @@ -1487,7 +1487,7 @@ fn test_fee_spike_violation_fails_htlc() { let chan_signer = remote_chan.get_signer(); let pubkeys = chan_signer.as_ref().pubkeys(); (pubkeys.delayed_payment_basepoint, pubkeys.htlc_basepoint, - chan_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &secp_ctx), + chan_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &secp_ctx).unwrap(), chan_signer.as_ref().pubkeys().funding_pubkey) }; diff --git a/lightning/src/sign/mod.rs b/lightning/src/sign/mod.rs index 885b8840b76..d684b4fe680 100644 --- a/lightning/src/sign/mod.rs +++ b/lightning/src/sign/mod.rs @@ -725,12 +725,23 @@ impl HTLCDescriptor { /// A trait to handle Lightning channel key material without concretizing the channel type or /// the signature mechanism. +/// +/// Several methods allow error types to be returned to support async signing. This feature +/// is not yet complete, and panics may occur in certain situations when returning errors +/// for these methods. pub trait ChannelSigner { /// Gets the per-commitment point for a specific commitment number /// /// Note that the commitment number starts at `(1 << 48) - 1` and counts backwards. - fn get_per_commitment_point(&self, idx: u64, secp_ctx: &Secp256k1) - -> PublicKey; + /// + /// If the signer returns `Err`, then the user is responsible for either force-closing the channel + /// or calling `ChannelManager::signer_unblocked` (this method is only available when the + /// `async_signing` cfg flag is enabled) once the signature is ready. + /// + // TODO: link to `signer_unblocked` once the cfg flag is removed + fn get_per_commitment_point( + &self, idx: u64, secp_ctx: &Secp256k1, + ) -> Result; /// Gets the commitment secret for a specific commitment number as part of the revocation process /// @@ -1343,11 +1354,11 @@ impl EntropySource for InMemorySigner { impl ChannelSigner for InMemorySigner { fn get_per_commitment_point( &self, idx: u64, secp_ctx: &Secp256k1, - ) -> PublicKey { + ) -> Result { let commitment_secret = SecretKey::from_slice(&chan_utils::build_commitment_secret(&self.commitment_seed, idx)) .unwrap(); - PublicKey::from_secret_key(secp_ctx, &commitment_secret) + Ok(PublicKey::from_secret_key(secp_ctx, &commitment_secret)) } fn release_commitment_secret(&self, idx: u64) -> [u8; 32] { diff --git a/lightning/src/util/test_channel_signer.rs b/lightning/src/util/test_channel_signer.rs index 28647982bda..aa491b04bd8 100644 --- a/lightning/src/util/test_channel_signer.rs +++ b/lightning/src/util/test_channel_signer.rs @@ -164,7 +164,9 @@ impl TestChannelSigner { } impl ChannelSigner for TestChannelSigner { - fn get_per_commitment_point(&self, idx: u64, secp_ctx: &Secp256k1) -> PublicKey { + fn get_per_commitment_point(&self, idx: u64, secp_ctx: &Secp256k1) -> Result { + // TODO: implement a mask in EnforcementState to let you test signatures being + // unavailable self.inner.get_per_commitment_point(idx, secp_ctx) } From 614da40f19ed7911903415f2820d82cbe294298b Mon Sep 17 00:00:00 2001 From: Alec Chen Date: Sat, 29 Jun 2024 15:41:27 -0700 Subject: [PATCH 5/6] Allow failing revoke_and_ack if commitment point is not available --- lightning/src/ln/channel.rs | 70 +++++++++++++++++++++++++++---------- 1 file changed, 51 insertions(+), 19 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 9a824b2d6a6..be9d2d3ea58 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -5363,9 +5363,9 @@ impl Channel where } let mut raa = if self.context.monitor_pending_revoke_and_ack { - Some(self.get_last_revoke_and_ack()) + self.get_last_revoke_and_ack(logger) } else { None }; - let commitment_update = if self.context.monitor_pending_commitment_signed { + let mut commitment_update = if self.context.monitor_pending_commitment_signed { self.get_last_commitment_update_for_send(logger).ok() } else { None }; if self.context.resend_order == RAACommitmentOrder::CommitmentFirst @@ -5373,6 +5373,11 @@ impl Channel where self.context.signer_pending_revoke_and_ack = true; raa = None; } + if self.context.resend_order == RAACommitmentOrder::RevokeAndACKFirst + && self.context.signer_pending_revoke_and_ack && commitment_update.is_some() { + self.context.signer_pending_commitment_update = true; + commitment_update = None; + } if commitment_update.is_some() { self.mark_awaiting_response(); @@ -5443,6 +5448,10 @@ impl Channel where /// blocked. #[cfg(async_signing)] pub fn signer_maybe_unblocked(&mut self, logger: &L) -> SignerResumeUpdates where L::Target: Logger { + if !self.context.holder_commitment_point.is_available() { + log_trace!(logger, "Attempting to update holder per-commitment point..."); + self.context.holder_commitment_point.try_resolve_pending(&self.context.holder_signer, &self.context.secp_ctx, logger); + } let funding_signed = if self.context.signer_pending_funding && !self.context.is_outbound() { self.context.get_funding_signed_msg(logger).1 } else { None }; @@ -5456,7 +5465,7 @@ impl Channel where } else { None }; let mut revoke_and_ack = if self.context.signer_pending_revoke_and_ack { log_trace!(logger, "Attempting to generate pending revoke and ack..."); - Some(self.get_last_revoke_and_ack()) + self.get_last_revoke_and_ack(logger) } else { None }; if self.context.resend_order == RAACommitmentOrder::CommitmentFirst @@ -5488,19 +5497,35 @@ impl Channel where } } - fn get_last_revoke_and_ack(&mut self) -> msgs::RevokeAndACK { - debug_assert!(self.context.holder_commitment_point.transaction_number() <= INITIAL_COMMITMENT_NUMBER + 2); - // TODO: handle non-available case when get_per_commitment_point becomes async - debug_assert!(self.context.holder_commitment_point.is_available()); - let next_per_commitment_point = self.context.holder_commitment_point.current_point(); + fn get_last_revoke_and_ack(&mut self, logger: &L) -> Option where L::Target: Logger { + debug_assert!(self.context.holder_commitment_point.transaction_number() <= INITIAL_COMMITMENT_NUMBER - 2); + self.context.holder_commitment_point.try_resolve_pending(&self.context.holder_signer, &self.context.secp_ctx, logger); let per_commitment_secret = self.context.holder_signer.as_ref().release_commitment_secret(self.context.holder_commitment_point.transaction_number() + 2); - self.context.signer_pending_revoke_and_ack = false; - msgs::RevokeAndACK { - channel_id: self.context.channel_id, - per_commitment_secret, - next_per_commitment_point, - #[cfg(taproot)] - next_local_nonce: None, + if let HolderCommitmentPoint::Available { transaction_number: _, current, next: _ } = self.context.holder_commitment_point { + self.context.signer_pending_revoke_and_ack = false; + Some(msgs::RevokeAndACK { + channel_id: self.context.channel_id, + per_commitment_secret, + next_per_commitment_point: current, + #[cfg(taproot)] + next_local_nonce: None, + }) + } else { + #[cfg(not(async_signing))] { + panic!("Holder commitment point must be Available when generating revoke_and_ack"); + } + #[cfg(async_signing)] { + // Technically if we're at HolderCommitmentPoint::PendingNext, + // we have a commitment point ready to send in an RAA, however we + // choose to wait since if we send RAA now, we could get another + // CS before we have any commitment point available. Blocking our + // RAA here is a convenient way to make sure that post-funding + // we're only ever waiting on one commitment point at a time. + log_trace!(logger, "Last revoke-and-ack pending in channel {} for sequence {} because the next per-commitment point is not available", + &self.context.channel_id(), self.context.holder_commitment_point.transaction_number()); + self.context.signer_pending_revoke_and_ack = true; + None + } } } @@ -5708,7 +5733,7 @@ impl Channel where self.context.monitor_pending_revoke_and_ack = true; None } else { - Some(self.get_last_revoke_and_ack()) + self.get_last_revoke_and_ack(logger) } } else { debug_assert!(false, "All values should have been handled in the four cases above"); @@ -5735,7 +5760,7 @@ impl Channel where } else { None }; if msg.next_local_commitment_number == next_counterparty_commitment_number { - if required_revoke.is_some() { + if required_revoke.is_some() || self.context.signer_pending_revoke_and_ack { log_debug!(logger, "Reconnected channel {} with only lost outbound RAA", &self.context.channel_id()); } else { log_debug!(logger, "Reconnected channel {} with no loss", &self.context.channel_id()); @@ -5748,7 +5773,7 @@ impl Channel where order: self.context.resend_order.clone(), }) } else if msg.next_local_commitment_number == next_counterparty_commitment_number - 1 { - if required_revoke.is_some() { + if required_revoke.is_some() || self.context.signer_pending_revoke_and_ack { log_debug!(logger, "Reconnected channel {} with lost outbound RAA and lost remote commitment tx", &self.context.channel_id()); } else { log_debug!(logger, "Reconnected channel {} with only lost remote commitment tx", &self.context.channel_id()); @@ -5762,7 +5787,14 @@ impl Channel where order: self.context.resend_order.clone(), }) } else { - let commitment_update = self.get_last_commitment_update_for_send(logger).ok(); + let commitment_update = if self.context.resend_order == RAACommitmentOrder::RevokeAndACKFirst + && self.context.signer_pending_revoke_and_ack { + log_trace!(logger, "Reconnected channel {} with lost outbound RAA and lost remote commitment tx, but unable to send due to resend order, waiting on signer for revoke and ack", &self.context.channel_id()); + self.context.signer_pending_commitment_update = true; + None + } else { + self.get_last_commitment_update_for_send(logger).ok() + }; let raa = if self.context.resend_order == RAACommitmentOrder::CommitmentFirst && self.context.signer_pending_commitment_update && required_revoke.is_some() { log_trace!(logger, "Reconnected channel {} with lost outbound RAA and lost remote commitment tx, but unable to send due to resend order, waiting on signer for commitment update", &self.context.channel_id()); From 45c0a0f10c6fe83f21727bae62af4976c630ec5d Mon Sep 17 00:00:00 2001 From: Alec Chen Date: Sun, 30 Jun 2024 16:05:48 -0700 Subject: [PATCH 6/6] Test async get per commitment point for revoke_and_ack Note: this does not test the CS -> RAA resend ordering, because this requires handling async get_per_commitment_point for channel reestablishment, which will be addressed in a follow up PR. --- lightning/src/ln/async_signer_tests.rs | 175 +++++++++++++++++++--- lightning/src/util/test_channel_signer.rs | 6 +- 2 files changed, 157 insertions(+), 24 deletions(-) diff --git a/lightning/src/ln/async_signer_tests.rs b/lightning/src/ln/async_signer_tests.rs index 533be389fd5..c90db503aea 100644 --- a/lightning/src/ln/async_signer_tests.rs +++ b/lightning/src/ln/async_signer_tests.rs @@ -127,6 +127,11 @@ fn test_async_commitment_signature_for_funding_signed() { #[test] fn test_async_commitment_signature_for_commitment_signed() { + do_test_async_commitment_signature_for_commitment_signed_revoke_and_ack(true); + do_test_async_commitment_signature_for_commitment_signed_revoke_and_ack(false); +} + +fn do_test_async_commitment_signature_for_commitment_signed_revoke_and_ack(enable_sign_counterparty_commit_first: bool) { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); @@ -154,23 +159,33 @@ fn test_async_commitment_signature_for_commitment_signed() { // Mark dst's signer as unavailable and handle src's commitment_signed: while dst won't yet have a // `commitment_signed` of its own to offer, it should publish a `revoke_and_ack`. + dst.disable_channel_signer_op(&src.node.get_our_node_id(), &chan_id, SignerOp::GetPerCommitmentPoint); dst.disable_channel_signer_op(&src.node.get_our_node_id(), &chan_id, SignerOp::SignCounterpartyCommitment); dst.node.handle_commitment_signed(&src.node.get_our_node_id(), &payment_event.commitment_msg); check_added_monitors(dst, 1); - get_event_msg!(dst, MessageSendEvent::SendRevokeAndACK, src.node.get_our_node_id()); - - // Mark dst's signer as available and retry: we now expect to see dst's `commitment_signed`. - dst.enable_channel_signer_op(&src.node.get_our_node_id(), &chan_id, SignerOp::SignCounterpartyCommitment); - dst.node.signer_unblocked(Some((src.node.get_our_node_id(), chan_id))); - - let events = dst.node.get_and_clear_pending_msg_events(); - assert_eq!(events.len(), 1, "expected one message, got {}", events.len()); - if let MessageSendEvent::UpdateHTLCs { ref node_id, .. } = events[0] { - assert_eq!(node_id, &src.node.get_our_node_id()); + if enable_sign_counterparty_commit_first { + // Unblock CS -> no messages should be sent, since we must send RAA first. + dst.enable_channel_signer_op(&src.node.get_our_node_id(), &chan_id, SignerOp::SignCounterpartyCommitment); + dst.node.signer_unblocked(Some((src.node.get_our_node_id(), chan_id))); + let events = dst.node.get_and_clear_pending_msg_events(); + assert!(events.is_empty(), "expected no message, got {}", events.len()); + + // Unblock revoke_and_ack -> we should send both RAA + CS. + dst.enable_channel_signer_op(&src.node.get_our_node_id(), &chan_id, SignerOp::GetPerCommitmentPoint); + dst.node.signer_unblocked(Some((src.node.get_our_node_id(), chan_id))); + get_revoke_commit_msgs(&dst, &src.node.get_our_node_id()); } else { - panic!("expected UpdateHTLCs message, not {:?}", events[0]); - }; + // Unblock revoke_and_ack -> we should send just RAA. + dst.enable_channel_signer_op(&src.node.get_our_node_id(), &chan_id, SignerOp::GetPerCommitmentPoint); + dst.node.signer_unblocked(Some((src.node.get_our_node_id(), chan_id))); + get_event_msg!(dst, MessageSendEvent::SendRevokeAndACK, src.node.get_our_node_id()); + + // Unblock commitment signed -> we should send CS. + dst.enable_channel_signer_op(&src.node.get_our_node_id(), &chan_id, SignerOp::SignCounterpartyCommitment); + dst.node.signer_unblocked(Some((src.node.get_our_node_id(), chan_id))); + get_htlc_update_msgs(dst, &src.node.get_our_node_id()); + } } #[test] @@ -272,9 +287,125 @@ fn test_async_commitment_signature_for_funding_signed_0conf() { assert_eq!(nodes[1].node.list_usable_channels().len(), 1); } +#[derive(PartialEq)] +enum UnblockSignerAcrossDisconnectCase { + AtEnd, + BeforeMonitorRestored, + BeforeReestablish, +} + +#[test] +fn test_async_raa_peer_disconnect() { + do_test_async_raa_peer_disconnect(UnblockSignerAcrossDisconnectCase::AtEnd); + do_test_async_raa_peer_disconnect(UnblockSignerAcrossDisconnectCase::BeforeMonitorRestored); + do_test_async_raa_peer_disconnect(UnblockSignerAcrossDisconnectCase::BeforeReestablish); +} + +fn do_test_async_raa_peer_disconnect(test_case: UnblockSignerAcrossDisconnectCase) { + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let (_, _, chan_id, _) = create_announced_chan_between_nodes(&nodes, 0, 1); + + // Send a payment. + let src = &nodes[0]; + let dst = &nodes[1]; + let (route, our_payment_hash, _our_payment_preimage, our_payment_secret) = get_route_and_payment_hash!(src, dst, 8000000); + src.node.send_payment_with_route(&route, our_payment_hash, + RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap(); + check_added_monitors!(src, 1); + + // Pass the payment along the route. + let payment_event = { + let mut events = src.node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + SendEvent::from_event(events.remove(0)) + }; + assert_eq!(payment_event.node_id, dst.node.get_our_node_id()); + assert_eq!(payment_event.msgs.len(), 1); + + dst.node.handle_update_add_htlc(&src.node.get_our_node_id(), &payment_event.msgs[0]); + + if test_case == UnblockSignerAcrossDisconnectCase::BeforeMonitorRestored { + // Fail to persist the monitor update when handling the commitment_signed. + chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); + } + + // Mark dst's signer as unavailable and handle src's commitment_signed: while dst won't yet have a + // `commitment_signed` of its own to offer, it should publish a `revoke_and_ack`. + dst.disable_channel_signer_op(&src.node.get_our_node_id(), &chan_id, SignerOp::GetPerCommitmentPoint); + dst.node.handle_commitment_signed(&src.node.get_our_node_id(), &payment_event.commitment_msg); + check_added_monitors(dst, 1); + + let events = dst.node.get_and_clear_pending_msg_events(); + assert!(events.is_empty(), "expected no message, got {}", events.len()); + + // Now disconnect and reconnect the peers. + src.node.peer_disconnected(&dst.node.get_our_node_id()); + dst.node.peer_disconnected(&src.node.get_our_node_id()); + + // do reestablish stuff + src.node.peer_connected(&dst.node.get_our_node_id(), &msgs::Init { + features: dst.node.init_features(), networks: None, remote_network_address: None + }, true).unwrap(); + let reestablish_1 = get_chan_reestablish_msgs!(src, dst); + assert_eq!(reestablish_1.len(), 1); + dst.node.peer_connected(&src.node.get_our_node_id(), &msgs::Init { + features: src.node.init_features(), networks: None, remote_network_address: None + }, false).unwrap(); + let reestablish_2 = get_chan_reestablish_msgs!(dst, src); + assert_eq!(reestablish_2.len(), 1); + + if test_case == UnblockSignerAcrossDisconnectCase::BeforeReestablish { + // Reenable the signer before the reestablish. + dst.enable_channel_signer_op(&src.node.get_our_node_id(), &chan_id, SignerOp::GetPerCommitmentPoint); + } + + dst.node.handle_channel_reestablish(&src.node.get_our_node_id(), &reestablish_1[0]); + + if test_case == UnblockSignerAcrossDisconnectCase::BeforeMonitorRestored { + dst.enable_channel_signer_op(&src.node.get_our_node_id(), &chan_id, SignerOp::GetPerCommitmentPoint); + chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); + let (outpoint, latest_update, _) = dst.chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id).unwrap().clone(); + dst.chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); + check_added_monitors!(dst, 0); + } + + // Expect the RAA + let (_, revoke_and_ack, commitment_signed, resend_order) = handle_chan_reestablish_msgs!(dst, src); + if test_case == UnblockSignerAcrossDisconnectCase::AtEnd { + assert!(revoke_and_ack.is_none()); + assert!(commitment_signed.is_none()); + } else { + assert!(revoke_and_ack.is_some()); + assert!(commitment_signed.is_some()); + assert!(resend_order == RAACommitmentOrder::RevokeAndACKFirst); + } + + // Mark dst's signer as available and retry: we now expect to see dst's RAA + CS. + dst.enable_channel_signer_op(&src.node.get_our_node_id(), &chan_id, SignerOp::GetPerCommitmentPoint); + dst.node.signer_unblocked(Some((src.node.get_our_node_id(), chan_id))); + + if test_case == UnblockSignerAcrossDisconnectCase::AtEnd { + let (_, revoke_and_ack, commitment_signed, resend_order) = handle_chan_reestablish_msgs!(dst, src); + assert!(revoke_and_ack.is_some()); + assert!(commitment_signed.is_some()); + assert!(resend_order == RAACommitmentOrder::RevokeAndACKFirst); + } else { + // Make sure we don't double send the RAA. + let (_, revoke_and_ack, commitment_signed, _) = handle_chan_reestablish_msgs!(dst, src); + assert!(revoke_and_ack.is_none()); + assert!(commitment_signed.is_none()); + } +} + + #[test] fn test_async_commitment_signature_peer_disconnect() { - do_test_async_commitment_signature_peer_disconnect(0); + // This tests that if our signer is blocked and gets unblocked + // after a peer disconnect + channel reestablish, we'll send the right messages. + do_test_async_commitment_signature_peer_disconnect(UnblockSignerAcrossDisconnectCase::AtEnd); } #[test] @@ -283,7 +414,7 @@ fn test_async_commitment_signature_peer_disconnect_signer_restored_before_monito // and needed to send a CS, that if our signer becomes available before the monitor // update completes, then we don't send duplicate messages upon calling `signer_unblocked` // after the monitor update completes. - do_test_async_commitment_signature_peer_disconnect(1); + do_test_async_commitment_signature_peer_disconnect(UnblockSignerAcrossDisconnectCase::BeforeMonitorRestored); } #[test] @@ -291,10 +422,10 @@ fn test_async_commitment_signature_peer_disconnect_signer_restored_before_reesta // This tests that if we tried to send a commitment_signed, but our signer was blocked, // if we disconnect, reconnect, the signer becomes available, then handle channel_reestablish, // that we don't send duplicate messages upon calling `signer_unblocked`. - do_test_async_commitment_signature_peer_disconnect(2); + do_test_async_commitment_signature_peer_disconnect(UnblockSignerAcrossDisconnectCase::BeforeReestablish); } -fn do_test_async_commitment_signature_peer_disconnect(test_case: u8) { +fn do_test_async_commitment_signature_peer_disconnect(test_case: UnblockSignerAcrossDisconnectCase) { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); @@ -320,7 +451,7 @@ fn do_test_async_commitment_signature_peer_disconnect(test_case: u8) { dst.node.handle_update_add_htlc(&src.node.get_our_node_id(), &payment_event.msgs[0]); - if test_case == 1 { + if test_case == UnblockSignerAcrossDisconnectCase::BeforeMonitorRestored { // Fail to persist the monitor update when handling the commitment_signed. chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); } @@ -331,7 +462,7 @@ fn do_test_async_commitment_signature_peer_disconnect(test_case: u8) { dst.node.handle_commitment_signed(&src.node.get_our_node_id(), &payment_event.commitment_msg); check_added_monitors(dst, 1); - if test_case != 1 { + if test_case != UnblockSignerAcrossDisconnectCase::BeforeMonitorRestored { get_event_msg!(dst, MessageSendEvent::SendRevokeAndACK, src.node.get_our_node_id()); } @@ -351,14 +482,14 @@ fn do_test_async_commitment_signature_peer_disconnect(test_case: u8) { let reestablish_2 = get_chan_reestablish_msgs!(dst, src); assert_eq!(reestablish_2.len(), 1); - if test_case == 2 { + if test_case == UnblockSignerAcrossDisconnectCase::BeforeReestablish { // Reenable the signer before the reestablish. dst.enable_channel_signer_op(&src.node.get_our_node_id(), &chan_id, SignerOp::SignCounterpartyCommitment); } dst.node.handle_channel_reestablish(&src.node.get_our_node_id(), &reestablish_1[0]); - if test_case == 1 { + if test_case == UnblockSignerAcrossDisconnectCase::BeforeMonitorRestored { dst.enable_channel_signer_op(&src.node.get_our_node_id(), &chan_id, SignerOp::SignCounterpartyCommitment); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); let (outpoint, latest_update, _) = dst.chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id).unwrap().clone(); @@ -369,7 +500,7 @@ fn do_test_async_commitment_signature_peer_disconnect(test_case: u8) { // Expect the RAA let (_, revoke_and_ack, commitment_signed, _) = handle_chan_reestablish_msgs!(dst, src); assert!(revoke_and_ack.is_some()); - if test_case == 0 { + if test_case == UnblockSignerAcrossDisconnectCase::AtEnd { assert!(commitment_signed.is_none()); } else { assert!(commitment_signed.is_some()); @@ -379,7 +510,7 @@ fn do_test_async_commitment_signature_peer_disconnect(test_case: u8) { dst.enable_channel_signer_op(&src.node.get_our_node_id(), &chan_id, SignerOp::SignCounterpartyCommitment); dst.node.signer_unblocked(Some((src.node.get_our_node_id(), chan_id))); - if test_case == 0 { + if test_case == UnblockSignerAcrossDisconnectCase::AtEnd { let (_, _, commitment_signed, _) = handle_chan_reestablish_msgs!(dst, src); assert!(commitment_signed.is_some()); } else { diff --git a/lightning/src/util/test_channel_signer.rs b/lightning/src/util/test_channel_signer.rs index aa491b04bd8..13dfd6a8353 100644 --- a/lightning/src/util/test_channel_signer.rs +++ b/lightning/src/util/test_channel_signer.rs @@ -165,8 +165,10 @@ impl TestChannelSigner { impl ChannelSigner for TestChannelSigner { fn get_per_commitment_point(&self, idx: u64, secp_ctx: &Secp256k1) -> Result { - // TODO: implement a mask in EnforcementState to let you test signatures being - // unavailable + #[cfg(test)] + if !self.is_signer_available(SignerOp::GetPerCommitmentPoint) { + return Err(()); + } self.inner.get_per_commitment_point(idx, secp_ctx) }