diff --git a/lightning/src/chain/channelmonitor.rs b/lightning/src/chain/channelmonitor.rs index 009281958bf..9ce18e8e102 100644 --- a/lightning/src/chain/channelmonitor.rs +++ b/lightning/src/chain/channelmonitor.rs @@ -370,8 +370,8 @@ impl OnchainEventEntry { conf_threshold } - fn has_reached_confirmation_threshold(&self, height: u32) -> bool { - height >= self.confirmation_threshold() + fn has_reached_confirmation_threshold(&self, best_block: &BestBlock) -> bool { + best_block.height() >= self.confirmation_threshold() } } @@ -1331,7 +1331,7 @@ impl ChannelMonitorImpl { macro_rules! claim_htlcs { ($commitment_number: expr, $txid: expr) => { let htlc_claim_reqs = self.get_counterparty_htlc_output_claim_reqs($commitment_number, $txid, None); - self.onchain_tx_handler.update_claims_view(&Vec::new(), htlc_claim_reqs, self.best_block.height(), broadcaster, fee_estimator, logger); + self.onchain_tx_handler.update_claims_view(&Vec::new(), htlc_claim_reqs, self.best_block.height(), self.best_block.height(), broadcaster, fee_estimator, logger); } } if let Some(txid) = self.current_counterparty_commitment_txid { @@ -1353,11 +1353,14 @@ impl ChannelMonitorImpl { // *we* sign a holder commitment transaction, not when e.g. a watchtower broadcasts one of our // holder commitment transactions. if self.broadcasted_holder_revokable_script.is_some() { - let (claim_reqs, _) = self.get_broadcasted_holder_claims(&self.current_holder_commitment_tx, 0); - self.onchain_tx_handler.update_claims_view(&Vec::new(), claim_reqs, self.best_block.height(), broadcaster, fee_estimator, logger); + // Assume that the broadcasted commitment transaction confirmed in the current best + // block. Even if not, its a reasonable metric for the bump criteria on the HTLC + // transactions. + let (claim_reqs, _) = self.get_broadcasted_holder_claims(&self.current_holder_commitment_tx, self.best_block.height()); + self.onchain_tx_handler.update_claims_view(&Vec::new(), claim_reqs, self.best_block.height(), self.best_block.height(), broadcaster, fee_estimator, logger); if let Some(ref tx) = self.prev_holder_signed_commitment_tx { - let (claim_reqs, _) = self.get_broadcasted_holder_claims(&tx, 0); - self.onchain_tx_handler.update_claims_view(&Vec::new(), claim_reqs, self.best_block.height(), broadcaster, fee_estimator, logger); + let (claim_reqs, _) = self.get_broadcasted_holder_claims(&tx, self.best_block.height()); + self.onchain_tx_handler.update_claims_view(&Vec::new(), claim_reqs, self.best_block.height(), self.best_block.height(), broadcaster, fee_estimator, logger); } } } @@ -1724,7 +1727,7 @@ impl ChannelMonitorImpl { // Returns (1) `PackageTemplate`s that can be given to the OnChainTxHandler, so that the handler can // broadcast transactions claiming holder HTLC commitment outputs and (2) a holder revokable // script so we can detect whether a holder transaction has been seen on-chain. - fn get_broadcasted_holder_claims(&self, holder_tx: &HolderSignedTx, height: u32) -> (Vec, Option<(Script, PublicKey, PublicKey)>) { + fn get_broadcasted_holder_claims(&self, holder_tx: &HolderSignedTx, conf_height: u32) -> (Vec, Option<(Script, PublicKey, PublicKey)>) { let mut claim_requests = Vec::with_capacity(holder_tx.htlc_outputs.len()); let redeemscript = chan_utils::get_revokeable_redeemscript(&holder_tx.revocation_key, self.on_holder_tx_csv, &holder_tx.delayed_payment_key); @@ -1743,7 +1746,7 @@ impl ChannelMonitorImpl { }; HolderHTLCOutput::build_accepted(payment_preimage, htlc.amount_msat) }; - let htlc_package = PackageTemplate::build_package(holder_tx.txid, transaction_output_index, PackageSolvingData::HolderHTLCOutput(htlc_output), height, false, height); + let htlc_package = PackageTemplate::build_package(holder_tx.txid, transaction_output_index, PackageSolvingData::HolderHTLCOutput(htlc_output), htlc.cltv_expiry, false, conf_height); claim_requests.push(htlc_package); } } @@ -1856,7 +1859,7 @@ impl ChannelMonitorImpl { } else if htlc.0.cltv_expiry > self.best_block.height() + 1 { // Don't broadcast HTLC-Timeout transactions immediately as they don't meet the // current locktime requirements on-chain. We will broadcast them in - // `block_confirmed` when `would_broadcast_at_height` returns true. + // `block_confirmed` when `should_broadcast_holder_commitment_txn` returns true. // Note that we add + 1 as transactions are broadcastable when they can be // confirmed in the next block. continue; @@ -1926,13 +1929,13 @@ impl ChannelMonitorImpl { if height > self.best_block.height() { self.best_block = BestBlock::new(block_hash, height); - self.block_confirmed(height, vec![], vec![], vec![], broadcaster, fee_estimator, logger) - } else { + self.block_confirmed(height, vec![], vec![], vec![], &broadcaster, &fee_estimator, &logger) + } else if block_hash != self.best_block.block_hash() { self.best_block = BestBlock::new(block_hash, height); self.onchain_events_awaiting_threshold_conf.retain(|ref entry| entry.height <= height); self.onchain_tx_handler.block_disconnected(height + 1, broadcaster, fee_estimator, logger); Vec::new() - } + } else { Vec::new() } } fn transactions_confirmed( @@ -2004,33 +2007,49 @@ impl ChannelMonitorImpl { self.is_paying_spendable_output(&tx, height, &logger); } - self.block_confirmed(height, txn_matched, watch_outputs, claimable_outpoints, broadcaster, fee_estimator, logger) + if height > self.best_block.height() { + self.best_block = BestBlock::new(block_hash, height); + } + + self.block_confirmed(height, txn_matched, watch_outputs, claimable_outpoints, &broadcaster, &fee_estimator, &logger) } + /// Update state for new block(s)/transaction(s) confirmed. Note that the caller must update + /// `self.best_block` before calling if a new best blockchain tip is available. More + /// concretely, `self.best_block` must never be at a lower height than `conf_height`, avoiding + /// complexity especially in `OnchainTx::update_claims_view`. + /// + /// `conf_height` should be set to the height at which any new transaction(s)/block(s) were + /// confirmed at, even if it is not the current best height. fn block_confirmed( &mut self, - height: u32, + conf_height: u32, txn_matched: Vec<&Transaction>, mut watch_outputs: Vec, mut claimable_outpoints: Vec, - broadcaster: B, - fee_estimator: F, - logger: L, + broadcaster: &B, + fee_estimator: &F, + logger: &L, ) -> Vec where B::Target: BroadcasterInterface, F::Target: FeeEstimator, L::Target: Logger, { - let should_broadcast = self.would_broadcast_at_height(height, &logger); + debug_assert!(self.best_block.height() >= conf_height); + + let should_broadcast = self.should_broadcast_holder_commitment_txn(logger); if should_broadcast { let funding_outp = HolderFundingOutput::build(self.funding_redeemscript.clone()); - let commitment_package = PackageTemplate::build_package(self.funding_info.0.txid.clone(), self.funding_info.0.index as u32, PackageSolvingData::HolderFundingOutput(funding_outp), height, false, height); + let commitment_package = PackageTemplate::build_package(self.funding_info.0.txid.clone(), self.funding_info.0.index as u32, PackageSolvingData::HolderFundingOutput(funding_outp), self.best_block.height(), false, self.best_block.height()); claimable_outpoints.push(commitment_package); self.pending_monitor_events.push(MonitorEvent::CommitmentTxBroadcasted(self.funding_info.0)); let commitment_tx = self.onchain_tx_handler.get_fully_signed_holder_tx(&self.funding_redeemscript); self.holder_tx_signed = true; - let (mut new_outpoints, _) = self.get_broadcasted_holder_claims(&self.current_holder_commitment_tx, height); + // Because we're broadcasting a commitment transaction, we should construct the package + // assuming it gets confirmed in the next block. Sadly, we have code which considers + // "not yet confirmed" things as discardable, so we cannot do that here. + let (mut new_outpoints, _) = self.get_broadcasted_holder_claims(&self.current_holder_commitment_tx, self.best_block.height()); let new_outputs = self.get_broadcasted_holder_watch_outputs(&self.current_holder_commitment_tx, &commitment_tx); if !new_outputs.is_empty() { watch_outputs.push((self.current_holder_commitment_tx.txid.clone(), new_outputs)); @@ -2043,7 +2062,7 @@ impl ChannelMonitorImpl { self.onchain_events_awaiting_threshold_conf.drain(..).collect::>(); let mut onchain_events_reaching_threshold_conf = Vec::new(); for entry in onchain_events_awaiting_threshold_conf { - if entry.has_reached_confirmation_threshold(height) { + if entry.has_reached_confirmation_threshold(&self.best_block) { onchain_events_reaching_threshold_conf.push(entry); } else { self.onchain_events_awaiting_threshold_conf.push(entry); @@ -2098,7 +2117,7 @@ impl ChannelMonitorImpl { } } - self.onchain_tx_handler.update_claims_view(&txn_matched, claimable_outpoints, height, &&*broadcaster, &&*fee_estimator, &&*logger); + self.onchain_tx_handler.update_claims_view(&txn_matched, claimable_outpoints, conf_height, self.best_block.height(), broadcaster, fee_estimator, logger); // Determine new outputs to watch by comparing against previously known outputs to watch, // updating the latter in the process. @@ -2200,7 +2219,7 @@ impl ChannelMonitorImpl { false } - fn would_broadcast_at_height(&self, height: u32, logger: &L) -> bool where L::Target: Logger { + fn should_broadcast_holder_commitment_txn(&self, logger: &L) -> bool where L::Target: Logger { // We need to consider all HTLCs which are: // * in any unrevoked counterparty commitment transaction, as they could broadcast said // transactions and we'd end up in a race, or @@ -2211,6 +2230,7 @@ impl ChannelMonitorImpl { // to the source, and if we don't fail the channel we will have to ensure that the next // updates that peer sends us are update_fails, failing the channel if not. It's probably // easier to just fail the channel as this case should be rare enough anyway. + let height = self.best_block.height(); macro_rules! scan_commitment { ($htlcs: expr, $holder_tx: expr) => { for ref htlc in $htlcs { diff --git a/lightning/src/chain/onchaintx.rs b/lightning/src/chain/onchaintx.rs index 23b05afe687..cd283146510 100644 --- a/lightning/src/chain/onchaintx.rs +++ b/lightning/src/chain/onchaintx.rs @@ -343,7 +343,7 @@ impl OnchainTxHandler { /// (CSV or CLTV following cases). In case of high-fee spikes, claim tx may stuck in the mempool, so you need to bump its feerate quickly using Replace-By-Fee or Child-Pay-For-Parent. /// Panics if there are signing errors, because signing operations in reaction to on-chain events /// are not expected to fail, and if they do, we may lose funds. - fn generate_claim_tx(&mut self, height: u32, cached_request: &PackageTemplate, fee_estimator: &F, logger: &L) -> Option<(Option, u64, Transaction)> + fn generate_claim_tx(&mut self, cur_height: u32, cached_request: &PackageTemplate, fee_estimator: &F, logger: &L) -> Option<(Option, u64, Transaction)> where F::Target: FeeEstimator, L::Target: Logger, { @@ -351,7 +351,7 @@ impl OnchainTxHandler { // Compute new height timer to decide when we need to regenerate a new bumped version of the claim tx (if we // didn't receive confirmation of it before, or not enough reorg-safe depth on top of it). - let new_timer = Some(cached_request.get_height_timer(height)); + let new_timer = Some(cached_request.get_height_timer(cur_height)); if cached_request.is_malleable() { let predicted_weight = cached_request.package_weight(&self.destination_script); if let Some((output_value, new_feerate)) = cached_request.compute_package_output(predicted_weight, fee_estimator, logger) { @@ -377,12 +377,15 @@ impl OnchainTxHandler { /// for this channel, provide new relevant on-chain transactions and/or new claim requests. /// Formerly this was named `block_connected`, but it is now also used for claiming an HTLC output /// if we receive a preimage after force-close. - pub(crate) fn update_claims_view(&mut self, txn_matched: &[&Transaction], requests: Vec, height: u32, broadcaster: &B, fee_estimator: &F, logger: &L) + /// `conf_height` represents the height at which the transactions in `txn_matched` were + /// confirmed. This does not need to equal the current blockchain tip height, which should be + /// provided via `cur_height`, however it must never be higher than `cur_height`. + pub(crate) fn update_claims_view(&mut self, txn_matched: &[&Transaction], requests: Vec, conf_height: u32, cur_height: u32, broadcaster: &B, fee_estimator: &F, logger: &L) where B::Target: BroadcasterInterface, F::Target: FeeEstimator, L::Target: Logger, { - log_debug!(logger, "Updating claims view at height {} with {} matched transactions and {} claim requests", height, txn_matched.len(), requests.len()); + log_debug!(logger, "Updating claims view at height {} with {} matched transactions in block {} and {} claim requests", cur_height, txn_matched.len(), conf_height, requests.len()); let mut preprocessed_requests = Vec::with_capacity(requests.len()); let mut aggregated_request = None; @@ -401,8 +404,8 @@ impl OnchainTxHandler { continue; } - if req.package_timelock() > height + 1 { - log_info!(logger, "Delaying claim of package until its timelock at {} (current height {}), the following outpoints are spent:", req.package_timelock(), height); + if req.package_timelock() > cur_height + 1 { + log_info!(logger, "Delaying claim of package until its timelock at {} (current height {}), the following outpoints are spent:", req.package_timelock(), cur_height); for outpoint in req.outpoints() { log_info!(logger, " Outpoint {}", outpoint); } @@ -410,8 +413,8 @@ impl OnchainTxHandler { continue; } - log_trace!(logger, "Test if outpoint can be aggregated with expiration {} against {}", req.timelock(), height + CLTV_SHARED_CLAIM_BUFFER); - if req.timelock() <= height + CLTV_SHARED_CLAIM_BUFFER || !req.aggregable() { + log_trace!(logger, "Test if outpoint can be aggregated with expiration {} against {}", req.timelock(), cur_height + CLTV_SHARED_CLAIM_BUFFER); + if req.timelock() <= cur_height + CLTV_SHARED_CLAIM_BUFFER || !req.aggregable() { // Don't aggregate if outpoint package timelock is soon or marked as non-aggregable preprocessed_requests.push(req); } else if aggregated_request.is_none() { @@ -425,8 +428,8 @@ impl OnchainTxHandler { preprocessed_requests.push(req); } - // Claim everything up to and including height + 1 - let remaining_locked_packages = self.locktimed_packages.split_off(&(height + 2)); + // Claim everything up to and including cur_height + 1 + let remaining_locked_packages = self.locktimed_packages.split_off(&(cur_height + 2)); for (pop_height, mut entry) in self.locktimed_packages.iter_mut() { log_trace!(logger, "Restoring delayed claim of package(s) at their timelock at {}.", pop_height); preprocessed_requests.append(&mut entry); @@ -436,13 +439,13 @@ impl OnchainTxHandler { // Generate claim transactions and track them to bump if necessary at // height timer expiration (i.e in how many blocks we're going to take action). for mut req in preprocessed_requests { - if let Some((new_timer, new_feerate, tx)) = self.generate_claim_tx(height, &req, &*fee_estimator, &*logger) { + if let Some((new_timer, new_feerate, tx)) = self.generate_claim_tx(cur_height, &req, &*fee_estimator, &*logger) { req.set_timer(new_timer); req.set_feerate(new_feerate); let txid = tx.txid(); for k in req.outpoints() { log_info!(logger, "Registering claiming request for {}:{}", k.txid, k.vout); - self.claimable_outpoints.insert(k.clone(), (txid, height)); + self.claimable_outpoints.insert(k.clone(), (txid, conf_height)); } self.pending_claim_requests.insert(txid, req); log_info!(logger, "Broadcasting onchain {}", log_tx!(tx)); @@ -476,7 +479,7 @@ impl OnchainTxHandler { () => { let entry = OnchainEventEntry { txid: tx.txid(), - height, + height: conf_height, event: OnchainEvent::Claim { claim_request: first_claim_txid_height.0.clone() } }; if !self.onchain_events_awaiting_threshold_conf.contains(&entry) { @@ -516,7 +519,7 @@ impl OnchainTxHandler { for package in claimed_outputs_material.drain(..) { let entry = OnchainEventEntry { txid: tx.txid(), - height, + height: conf_height, event: OnchainEvent::ContentiousOutpoint { package }, }; if !self.onchain_events_awaiting_threshold_conf.contains(&entry) { @@ -529,7 +532,7 @@ impl OnchainTxHandler { let onchain_events_awaiting_threshold_conf = self.onchain_events_awaiting_threshold_conf.drain(..).collect::>(); for entry in onchain_events_awaiting_threshold_conf { - if entry.has_reached_confirmation_threshold(height) { + if entry.has_reached_confirmation_threshold(cur_height) { match entry.event { OnchainEvent::Claim { claim_request } => { // We may remove a whole set of claim outpoints here, as these one may have @@ -555,7 +558,7 @@ impl OnchainTxHandler { // Check if any pending claim request must be rescheduled for (first_claim_txid, ref request) in self.pending_claim_requests.iter() { if let Some(h) = request.timer() { - if height >= h { + if cur_height >= h { bump_candidates.insert(*first_claim_txid, (*request).clone()); } } @@ -564,7 +567,7 @@ impl OnchainTxHandler { // Build, bump and rebroadcast tx accordingly log_trace!(logger, "Bumping {} candidates", bump_candidates.len()); for (first_claim_txid, request) in bump_candidates.iter() { - if let Some((new_timer, new_feerate, bump_tx)) = self.generate_claim_tx(height, &request, &*fee_estimator, &*logger) { + if let Some((new_timer, new_feerate, bump_tx)) = self.generate_claim_tx(cur_height, &request, &*fee_estimator, &*logger) { log_info!(logger, "Broadcasting RBF-bumped onchain {}", log_tx!(bump_tx)); broadcaster.broadcast_transaction(&bump_tx); if let Some(request) = self.pending_claim_requests.get_mut(first_claim_txid) { diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 5f5bc51dbb1..403744c4f6e 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -114,8 +114,8 @@ enum InboundHTLCState { /// commitment transaction without it as otherwise we'll have to force-close the channel to /// claim it before the timeout (obviously doesn't apply to revoked HTLCs that we can't claim /// anyway). That said, ChannelMonitor does this for us (see - /// ChannelMonitor::would_broadcast_at_height) so we actually remove the HTLC from our own - /// local state before then, once we're sure that the next commitment_signed and + /// ChannelMonitor::should_broadcast_holder_commitment_txn) so we actually remove the HTLC from + /// our own local state before then, once we're sure that the next commitment_signed and /// ChannelMonitor::provide_latest_local_commitment_tx will not include this HTLC. LocalRemoved(InboundHTLCRemovalReason), } diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index f72c76e7ac3..766b2394b7a 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -626,7 +626,7 @@ pub const MIN_FINAL_CLTV_EXPIRY: u32 = HTLC_FAIL_BACK_BUFFER + 3; const CHECK_CLTV_EXPIRY_SANITY: u32 = MIN_CLTV_EXPIRY_DELTA as u32 - LATENCY_GRACE_PERIOD_BLOCKS - CLTV_CLAIM_BUFFER - ANTI_REORG_DELAY - LATENCY_GRACE_PERIOD_BLOCKS; // Check for ability of an attacker to make us fail on-chain by delaying an HTLC claim. See -// ChannelMontior::would_broadcast_at_height for a description of why this is needed. +// ChannelMonitor::should_broadcast_holder_commitment_txn for a description of why this is needed. #[deny(const_err)] #[allow(dead_code)] const CHECK_CLTV_EXPIRY_SANITY_2: u32 = MIN_CLTV_EXPIRY_DELTA as u32 - LATENCY_GRACE_PERIOD_BLOCKS - 2*CLTV_CLAIM_BUFFER; diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index 166e943e66d..00bde294cef 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -221,6 +221,9 @@ impl<'a, 'b, 'c> Node<'a, 'b, 'c> { pub fn best_block_info(&self) -> (BlockHash, u32) { self.blocks.lock().unwrap().last().map(|(a, b)| (a.block_hash(), *b)).unwrap() } + pub fn get_block_header(&self, height: u32) -> BlockHeader { + self.blocks.lock().unwrap()[height as usize].0 + } } impl<'a, 'b, 'c> Drop for Node<'a, 'b, 'c> { @@ -997,6 +1000,30 @@ macro_rules! expect_payment_sent { } } +#[cfg(test)] +macro_rules! expect_payment_failure_chan_update { + ($node: expr, $scid: expr, $chan_closed: expr) => { + let events = $node.node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + match events[0] { + MessageSendEvent::PaymentFailureNetworkUpdate { ref update } => { + match update { + &HTLCFailChannelUpdate::ChannelUpdateMessage { ref msg } if !$chan_closed => { + assert_eq!(msg.contents.short_channel_id, $scid); + assert_eq!(msg.contents.flags & 2, 0); + }, + &HTLCFailChannelUpdate::ChannelClosed { short_channel_id, is_permanent } if $chan_closed => { + assert_eq!(short_channel_id, $scid); + assert!(is_permanent); + }, + _ => panic!("Unexpected update type"), + } + }, + _ => panic!("Unexpected event"), + } + } +} + #[cfg(test)] macro_rules! expect_payment_failed { ($node: expr, $expected_payment_hash: expr, $rejected_by_dest: expr $(, $expected_error_code: expr, $expected_error_data: expr)*) => { diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index 8eae9cc2dff..df3c64ae8ff 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -12,8 +12,7 @@ //! claim outputs on-chain. use chain; -use chain::Listen; -use chain::Watch; +use chain::{Confirm, Listen, Watch}; use chain::channelmonitor; use chain::channelmonitor::{ChannelMonitor, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY}; use chain::transaction::OutPoint; @@ -1360,15 +1359,7 @@ fn holding_cell_htlc_counting() { nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &bs_fail_updates.update_fail_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[1], bs_fail_updates.commitment_signed, false, true); - let events = nodes[0].node.get_and_clear_pending_msg_events(); - assert_eq!(events.len(), 1); - match events[0] { - MessageSendEvent::PaymentFailureNetworkUpdate { update: msgs::HTLCFailChannelUpdate::ChannelUpdateMessage { ref msg }} => { - assert_eq!(msg.contents.short_channel_id, chan_2.0.contents.short_channel_id); - }, - _ => panic!("Unexpected event"), - } - + expect_payment_failure_chan_update!(nodes[0], chan_2.0.contents.short_channel_id, false); expect_payment_failed!(nodes[0], payment_hash_2, false); // Now forward all the pending HTLCs and claim them back @@ -3009,10 +3000,16 @@ fn do_test_htlc_on_chain_timeout(connect_style: ConnectStyle) { connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); { - // B will rebroadcast its own holder commitment transaction here...just because + // B may rebroadcast its own holder commitment transaction here, as a safeguard against + // some incredibly unlikely partial-eclipse-attack scenarios. That said, because the + // original commitment_tx[0] (also spending chan_2.3) has reached ANTI_REORG_DELAY B really + // shouldn't broadcast anything here, and in some connect style scenarios we do not. let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); - assert_eq!(node_txn.len(), 1); - check_spends!(node_txn[0], chan_2.3); + if node_txn.len() == 1 { + check_spends!(node_txn[0], chan_2.3); + } else { + assert_eq!(node_txn.len(), 0); + } } expect_pending_htlcs_forwardable!(nodes[1]); @@ -3095,13 +3092,7 @@ fn test_simple_commitment_revoked_fail_backward() { nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, false, true); - - let events = nodes[0].node.get_and_clear_pending_msg_events(); - assert_eq!(events.len(), 1); - match events[0] { - MessageSendEvent::PaymentFailureNetworkUpdate { .. } => {}, - _ => panic!("Unexpected event"), - } + expect_payment_failure_chan_update!(nodes[0], chan_2.0.contents.short_channel_id, true); expect_payment_failed!(nodes[0], payment_hash, false); }, _ => panic!("Unexpected event"), @@ -4204,7 +4195,7 @@ fn do_test_holding_cell_htlc_add_timeouts(forwarded_htlc: bool) { let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); - create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known()); + let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known()); // Make sure all nodes are at the same starting height connect_blocks(&nodes[0], 2*CHAN_CONFIRM_DEPTH + 1 - nodes[0].best_block_info().1); @@ -4260,14 +4251,7 @@ fn do_test_holding_cell_htlc_add_timeouts(forwarded_htlc: bool) { _ => unreachable!(), } expect_payment_failed!(nodes[0], second_payment_hash, false); - if let &MessageSendEvent::PaymentFailureNetworkUpdate { ref update } = &nodes[0].node.get_and_clear_pending_msg_events()[0] { - match update { - &HTLCFailChannelUpdate::ChannelUpdateMessage { .. } => {}, - _ => panic!("Unexpected event"), - } - } else { - panic!("Unexpected event"); - } + expect_payment_failure_chan_update!(nodes[0], chan_2.0.contents.short_channel_id, false); } else { expect_payment_failed!(nodes[1], second_payment_hash, true); } @@ -4847,7 +4831,7 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() { } macro_rules! check_spendable_outputs { - ($node: expr, $der_idx: expr, $keysinterface: expr, $chan_value: expr) => { + ($node: expr, $keysinterface: expr) => { { let mut events = $node.chain_monitor.chain_monitor.get_and_clear_pending_events(); let mut txn = Vec::new(); @@ -4894,7 +4878,7 @@ fn test_claim_sizeable_push_msat() { mine_transaction(&nodes[1], &node_txn[0]); connect_blocks(&nodes[1], BREAKDOWN_TIMEOUT as u32 - 1); - let spend_txn = check_spendable_outputs!(nodes[1], 1, node_cfgs[1].keys_manager, 100000); + let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager); assert_eq!(spend_txn.len(), 1); assert_eq!(spend_txn[0].input.len(), 1); check_spends!(spend_txn[0], node_txn[0]); @@ -4925,7 +4909,7 @@ fn test_claim_on_remote_sizeable_push_msat() { check_added_monitors!(nodes[1], 1); connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); - let spend_txn = check_spendable_outputs!(nodes[1], 1, node_cfgs[1].keys_manager, 100000); + let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager); assert_eq!(spend_txn.len(), 1); check_spends!(spend_txn[0], node_txn[0]); } @@ -4955,7 +4939,7 @@ fn test_claim_on_remote_revoked_sizeable_push_msat() { mine_transaction(&nodes[1], &node_txn[0]); connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); - let spend_txn = check_spendable_outputs!(nodes[1], 1, node_cfgs[1].keys_manager, 100000); + let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager); assert_eq!(spend_txn.len(), 3); check_spends!(spend_txn[0], revoked_local_txn[0]); // to_remote output on revoked remote commitment_tx check_spends!(spend_txn[1], node_txn[0]); @@ -5004,7 +4988,7 @@ fn test_static_spendable_outputs_preimage_tx() { mine_transaction(&nodes[1], &node_txn[0]); connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); - let spend_txn = check_spendable_outputs!(nodes[1], 1, node_cfgs[1].keys_manager, 100000); + let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager); assert_eq!(spend_txn.len(), 1); check_spends!(spend_txn[0], node_txn[0]); } @@ -5049,7 +5033,7 @@ fn test_static_spendable_outputs_timeout_tx() { connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); expect_payment_failed!(nodes[1], our_payment_hash, true); - let spend_txn = check_spendable_outputs!(nodes[1], 1, node_cfgs[1].keys_manager, 100000); + let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager); assert_eq!(spend_txn.len(), 3); // SpendableOutput: remote_commitment_tx.to_remote, timeout_tx.output check_spends!(spend_txn[0], commitment_tx[0]); check_spends!(spend_txn[1], node_txn[1]); @@ -5085,7 +5069,7 @@ fn test_static_spendable_outputs_justice_tx_revoked_commitment_tx() { mine_transaction(&nodes[1], &node_txn[0]); connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); - let spend_txn = check_spendable_outputs!(nodes[1], 1, node_cfgs[1].keys_manager, 100000); + let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager); assert_eq!(spend_txn.len(), 1); check_spends!(spend_txn[0], node_txn[0]); } @@ -5152,7 +5136,7 @@ fn test_static_spendable_outputs_justice_tx_revoked_htlc_timeout_tx() { connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); // Check B's ChannelMonitor was able to generate the right spendable output descriptor - let spend_txn = check_spendable_outputs!(nodes[1], 1, node_cfgs[1].keys_manager, 100000); + let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager); assert_eq!(spend_txn.len(), 1); assert_eq!(spend_txn[0].input.len(), 1); check_spends!(spend_txn[0], node_txn[1]); @@ -5227,7 +5211,7 @@ fn test_static_spendable_outputs_justice_tx_revoked_htlc_success_tx() { // didn't try to generate any new transactions. // Check A's ChannelMonitor was able to generate the right spendable output descriptor - let spend_txn = check_spendable_outputs!(nodes[0], 1, node_cfgs[0].keys_manager, 100000); + let spend_txn = check_spendable_outputs!(nodes[0], node_cfgs[0].keys_manager); assert_eq!(spend_txn.len(), 3); assert_eq!(spend_txn[0].input.len(), 1); check_spends!(spend_txn[0], revoked_local_txn[0]); // spending to_remote output from revoked local tx @@ -5452,13 +5436,7 @@ fn test_duplicate_payment_hash_one_failure_one_success() { assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); { commitment_signed_dance!(nodes[0], nodes[1], &htlc_updates.commitment_signed, false, true); - let events = nodes[0].node.get_and_clear_pending_msg_events(); - assert_eq!(events.len(), 1); - match events[0] { - MessageSendEvent::PaymentFailureNetworkUpdate { update: msgs::HTLCFailChannelUpdate::ChannelClosed { .. } } => { - }, - _ => { panic!("Unexpected event"); } - } + expect_payment_failure_chan_update!(nodes[0], chan_2.0.contents.short_channel_id, true); } expect_payment_failed!(nodes[0], duplicate_payment_hash, false); @@ -5529,7 +5507,7 @@ fn test_dynamic_spendable_outputs_local_htlc_success_tx() { connect_blocks(&nodes[1], BREAKDOWN_TIMEOUT as u32 - 1); // Verify that B is able to spend its own HTLC-Success tx thanks to spendable output event given back by its ChannelMonitor - let spend_txn = check_spendable_outputs!(nodes[1], 1, node_cfgs[1].keys_manager, 100000); + let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager); assert_eq!(spend_txn.len(), 1); assert_eq!(spend_txn[0].input.len(), 1); check_spends!(spend_txn[0], node_tx); @@ -5827,7 +5805,7 @@ fn test_dynamic_spendable_outputs_local_htlc_timeout_tx() { expect_payment_failed!(nodes[0], our_payment_hash, true); // Verify that A is able to spend its own HTLC-Timeout tx thanks to spendable output event given back by its ChannelMonitor - let spend_txn = check_spendable_outputs!(nodes[0], 1, node_cfgs[0].keys_manager, 100000); + let spend_txn = check_spendable_outputs!(nodes[0], node_cfgs[0].keys_manager); assert_eq!(spend_txn.len(), 3); check_spends!(spend_txn[0], local_txn[0]); assert_eq!(spend_txn[1].input.len(), 1); @@ -5908,7 +5886,7 @@ fn test_key_derivation_params() { // Verify that A is able to spend its own HTLC-Timeout tx thanks to spendable output event given back by its ChannelMonitor let new_keys_manager = test_utils::TestKeysInterface::new(&seed, Network::Testnet); - let spend_txn = check_spendable_outputs!(nodes[0], 1, new_keys_manager, 100000); + let spend_txn = check_spendable_outputs!(nodes[0], new_keys_manager); assert_eq!(spend_txn.len(), 3); check_spends!(spend_txn[0], local_txn_1[0]); assert_eq!(spend_txn[1].input.len(), 1); @@ -5935,14 +5913,14 @@ fn test_static_output_closing_tx() { mine_transaction(&nodes[0], &closing_tx); connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1); - let spend_txn = check_spendable_outputs!(nodes[0], 2, node_cfgs[0].keys_manager, 100000); + let spend_txn = check_spendable_outputs!(nodes[0], node_cfgs[0].keys_manager); assert_eq!(spend_txn.len(), 1); check_spends!(spend_txn[0], closing_tx); mine_transaction(&nodes[1], &closing_tx); connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); - let spend_txn = check_spendable_outputs!(nodes[1], 2, node_cfgs[1].keys_manager, 100000); + let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager); assert_eq!(spend_txn.len(), 1); check_spends!(spend_txn[0], closing_tx); } @@ -6517,20 +6495,8 @@ fn test_fail_holding_cell_htlc_upon_free_multihop() { _ => panic!("Unexpected event"), }; nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &raa); - let fail_msg_event = nodes[0].node.get_and_clear_pending_msg_events(); - assert_eq!(fail_msg_event.len(), 1); - match &fail_msg_event[0] { - &MessageSendEvent::PaymentFailureNetworkUpdate { .. } => {}, - _ => panic!("Unexpected event"), - } - let failure_event = nodes[0].node.get_and_clear_pending_events(); - assert_eq!(failure_event.len(), 1); - match &failure_event[0] { - &Event::PaymentFailed { rejected_by_dest, .. } => { - assert!(!rejected_by_dest); - }, - _ => panic!("Unexpected event"), - } + expect_payment_failure_chan_update!(nodes[0], chan_1_2.0.contents.short_channel_id, false); + expect_payment_failed!(nodes[0], our_payment_hash, false); check_added_monitors!(nodes[0], 1); } @@ -7786,7 +7752,7 @@ fn test_data_loss_protect() { assert_eq!(node_txn[0].output.len(), 2); mine_transaction(&nodes[0], &node_txn[0]); connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1); - let spend_txn = check_spendable_outputs!(nodes[0], 1, node_cfgs[0].keys_manager, 1000000); + let spend_txn = check_spendable_outputs!(nodes[0], node_cfgs[0].keys_manager); assert_eq!(spend_txn.len(), 1); check_spends!(spend_txn[0], node_txn[0]); } @@ -9318,3 +9284,86 @@ fn test_invalid_funding_tx() { } else { panic!(); } assert_eq!(nodes[1].node.list_channels().len(), 0); } + +fn do_test_tx_confirmed_skipping_blocks_immediate_broadcast(test_height_before_timelock: bool) { + // In the first version of the chain::Confirm interface, after a refactor was made to not + // broadcast CSV-locked transactions until their CSV lock is up, we wouldn't reliably broadcast + // transactions after a `transactions_confirmed` call. Specifically, if the chain, provided via + // `best_block_updated` is at height N, and a transaction output which we wish to spend at + // height N-1 (due to a CSV to height N-1) is provided at height N, we will not broadcast the + // spending transaction until height N+1 (or greater). This was due to the way + // `ChannelMonitor::transactions_confirmed` worked, only checking if we should broadcast a + // spending transaction at the height the input transaction was confirmed at, not whether we + // should broadcast a spending transaction at the current height. + // A second, similar, issue involved failing HTLCs backwards - because we only provided the + // height at which transactions were confirmed to `OnchainTx::update_claims_view`, it wasn't + // aware that the anti-reorg-delay had, in fact, already expired, waiting to fail-backwards + // until we learned about an additional block. + // + // As an additional check, if `test_height_before_timelock` is set, we instead test that we + // aren't broadcasting transactions too early (ie not broadcasting them at all). + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); + *nodes[0].connect_style.borrow_mut() = ConnectStyle::BestBlockFirstSkippingBlocks; + + create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); + let (chan_announce, _, channel_id, _) = create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known()); + let (_, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000); + nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id(), false); + nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); + + nodes[1].node.force_close_channel(&channel_id).unwrap(); + check_closed_broadcast!(nodes[1], true); + check_added_monitors!(nodes[1], 1); + let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); + assert_eq!(node_txn.len(), 1); + + let conf_height = nodes[1].best_block_info().1; + if !test_height_before_timelock { + connect_blocks(&nodes[1], 24 * 6); + } + nodes[1].chain_monitor.chain_monitor.transactions_confirmed( + &nodes[1].get_block_header(conf_height), &[(0, &node_txn[0])], conf_height); + if test_height_before_timelock { + // If we confirmed the close transaction, but timelocks have not yet expired, we should not + // generate any events or broadcast any transactions + assert!(nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().is_empty()); + assert!(nodes[1].chain_monitor.chain_monitor.get_and_clear_pending_events().is_empty()); + } else { + // We should broadcast an HTLC transaction spending our funding transaction first + let spending_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); + assert_eq!(spending_txn.len(), 2); + assert_eq!(spending_txn[0], node_txn[0]); + check_spends!(spending_txn[1], node_txn[0]); + // We should also generate a SpendableOutputs event with the to_self output (as its + // timelock is up). + let descriptor_spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager); + assert_eq!(descriptor_spend_txn.len(), 1); + + // If we also discover that the HTLC-Timeout transaction was confirmed some time ago, we + // should immediately fail-backwards the HTLC to the previous hop, without waiting for an + // additional block built on top of the current chain. + nodes[1].chain_monitor.chain_monitor.transactions_confirmed( + &nodes[1].get_block_header(conf_height + 1), &[(0, &spending_txn[1])], conf_height + 1); + expect_pending_htlcs_forwardable!(nodes[1]); + check_added_monitors!(nodes[1], 1); + + let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + assert!(updates.update_add_htlcs.is_empty()); + assert!(updates.update_fulfill_htlcs.is_empty()); + assert_eq!(updates.update_fail_htlcs.len(), 1); + assert!(updates.update_fail_malformed_htlcs.is_empty()); + assert!(updates.update_fee.is_none()); + nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]); + commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, true, true); + expect_payment_failed!(nodes[0], payment_hash, false); + expect_payment_failure_chan_update!(nodes[0], chan_announce.contents.short_channel_id, true); + } +} +#[test] +fn test_tx_confirmed_skipping_blocks_immediate_broadcast() { + do_test_tx_confirmed_skipping_blocks_immediate_broadcast(false); + do_test_tx_confirmed_skipping_blocks_immediate_broadcast(true); +}