Skip to content

Commit 3b58e24

Browse files
committed
Create SpendableOutputs events no matter the chain::Confirm order
We had a user who pointed out that we weren't creating `SpendableOutputs` events when we should have been after they called `ChannelMonitor::best_block_updated` with a block well after a CSV locktime and then called `ChannelMonitor::transactions_confirmed` with the transaction which we should have been spending (with a block height/hash a ways in the past). This was due to `ChannelMonitor::transactions_confirmed` only calling `ChannelMonitor::block_confirmed` with the height at which the transactions were confirmed, resulting in all checks being done against that, not the current height. Further, in the same scenario, we also would not fail-back and HTLC where the HTLC-Timeout transaction was confirmed more than ANTI_REORG_DELAY blocks ago. To address this, we use the best block height for confirmation threshold checks in `ChannelMonitor::block_confirmed` and pass both the confirmation and current heights through to `OnchainTx::update_claims_view`, using each as appropriate. Fixes lightningdevkit#962.
1 parent c970b8d commit 3b58e24

File tree

4 files changed

+125
-33
lines changed

4 files changed

+125
-33
lines changed

lightning/src/chain/channelmonitor.rs

Lines changed: 19 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -1335,7 +1335,7 @@ impl<Signer: Sign> ChannelMonitorImpl<Signer> {
13351335
macro_rules! claim_htlcs {
13361336
($commitment_number: expr, $txid: expr) => {
13371337
let htlc_claim_reqs = self.get_counterparty_htlc_output_claim_reqs($commitment_number, $txid, None);
1338-
self.onchain_tx_handler.update_claims_view(&Vec::new(), htlc_claim_reqs, self.best_block.height(), broadcaster, fee_estimator, logger);
1338+
self.onchain_tx_handler.update_claims_view(&Vec::new(), htlc_claim_reqs, self.best_block.height(), self.best_block.height(), broadcaster, fee_estimator, logger);
13391339
}
13401340
}
13411341
if let Some(txid) = self.current_counterparty_commitment_txid {
@@ -1358,10 +1358,10 @@ impl<Signer: Sign> ChannelMonitorImpl<Signer> {
13581358
// holder commitment transactions.
13591359
if self.broadcasted_holder_revokable_script.is_some() {
13601360
let (claim_reqs, _) = self.get_broadcasted_holder_claims(&self.current_holder_commitment_tx, 0);
1361-
self.onchain_tx_handler.update_claims_view(&Vec::new(), claim_reqs, self.best_block.height(), broadcaster, fee_estimator, logger);
1361+
self.onchain_tx_handler.update_claims_view(&Vec::new(), claim_reqs, self.best_block.height(), self.best_block.height(), broadcaster, fee_estimator, logger);
13621362
if let Some(ref tx) = self.prev_holder_signed_commitment_tx {
13631363
let (claim_reqs, _) = self.get_broadcasted_holder_claims(&tx, 0);
1364-
self.onchain_tx_handler.update_claims_view(&Vec::new(), claim_reqs, self.best_block.height(), broadcaster, fee_estimator, logger);
1364+
self.onchain_tx_handler.update_claims_view(&Vec::new(), claim_reqs, self.best_block.height(), self.best_block.height(), broadcaster, fee_estimator, logger);
13651365
}
13661366
}
13671367
}
@@ -1930,7 +1930,7 @@ impl<Signer: Sign> ChannelMonitorImpl<Signer> {
19301930

19311931
if height > self.best_block.height() {
19321932
self.best_block = BestBlock::new(block_hash, height);
1933-
self.block_confirmed(height, vec![], vec![], vec![], broadcaster, fee_estimator, logger)
1933+
self.block_confirmed(height, vec![], vec![], vec![], &broadcaster, &fee_estimator, &logger)
19341934
} else if self.best_block.block_hash() != block_hash {
19351935
self.best_block = BestBlock::new(block_hash, height);
19361936
self.onchain_events_awaiting_threshold_conf.retain(|ref entry| entry.height <= height);
@@ -2012,33 +2012,37 @@ impl<Signer: Sign> ChannelMonitorImpl<Signer> {
20122012
self.best_block = BestBlock::new(block_hash, height);
20132013
}
20142014

2015-
self.block_confirmed(height, txn_matched, watch_outputs, claimable_outpoints, broadcaster, fee_estimator, logger)
2015+
self.block_confirmed(height, txn_matched, watch_outputs, claimable_outpoints, &broadcaster, &fee_estimator, &logger)
20162016
}
20172017

2018+
/// Update state for new block(s)/transaction(s) confirmed. Note that the caller *must* update
2019+
/// self.best_block before calling, and set `conf_height` to the height at which any new
2020+
/// transaction(s)/block(s) were confirmed at, even if it is not the current best height
2021+
/// (assuming we are being called with new transaction(s) confirmed).
20182022
fn block_confirmed<B: Deref, F: Deref, L: Deref>(
20192023
&mut self,
2020-
height: u32,
2024+
conf_height: u32,
20212025
txn_matched: Vec<&Transaction>,
20222026
mut watch_outputs: Vec<TransactionOutputs>,
20232027
mut claimable_outpoints: Vec<PackageTemplate>,
2024-
broadcaster: B,
2025-
fee_estimator: F,
2026-
logger: L,
2028+
broadcaster: &B,
2029+
fee_estimator: &F,
2030+
logger: &L,
20272031
) -> Vec<TransactionOutputs>
20282032
where
20292033
B::Target: BroadcasterInterface,
20302034
F::Target: FeeEstimator,
20312035
L::Target: Logger,
20322036
{
2033-
let should_broadcast = self.would_broadcast_at_height(height, &logger);
2037+
let should_broadcast = self.would_broadcast_at_height(self.best_block.height(), logger);
20342038
if should_broadcast {
20352039
let funding_outp = HolderFundingOutput::build(self.funding_redeemscript.clone());
2036-
let commitment_package = PackageTemplate::build_package(self.funding_info.0.txid.clone(), self.funding_info.0.index as u32, PackageSolvingData::HolderFundingOutput(funding_outp), height, false, height);
2040+
let commitment_package = PackageTemplate::build_package(self.funding_info.0.txid.clone(), self.funding_info.0.index as u32, PackageSolvingData::HolderFundingOutput(funding_outp), self.best_block.height(), false, self.best_block.height());
20372041
claimable_outpoints.push(commitment_package);
20382042
self.pending_monitor_events.push(MonitorEvent::CommitmentTxBroadcasted(self.funding_info.0));
20392043
let commitment_tx = self.onchain_tx_handler.get_fully_signed_holder_tx(&self.funding_redeemscript);
20402044
self.holder_tx_signed = true;
2041-
let (mut new_outpoints, _) = self.get_broadcasted_holder_claims(&self.current_holder_commitment_tx, height);
2045+
let (mut new_outpoints, _) = self.get_broadcasted_holder_claims(&self.current_holder_commitment_tx, self.best_block.height());
20422046
let new_outputs = self.get_broadcasted_holder_watch_outputs(&self.current_holder_commitment_tx, &commitment_tx);
20432047
if !new_outputs.is_empty() {
20442048
watch_outputs.push((self.current_holder_commitment_tx.txid.clone(), new_outputs));
@@ -2051,7 +2055,7 @@ impl<Signer: Sign> ChannelMonitorImpl<Signer> {
20512055
self.onchain_events_awaiting_threshold_conf.drain(..).collect::<Vec<_>>();
20522056
let mut onchain_events_reaching_threshold_conf = Vec::new();
20532057
for entry in onchain_events_awaiting_threshold_conf {
2054-
if entry.has_reached_confirmation_threshold(height) {
2058+
if entry.has_reached_confirmation_threshold(self.best_block.height()) {
20552059
onchain_events_reaching_threshold_conf.push(entry);
20562060
} else {
20572061
self.onchain_events_awaiting_threshold_conf.push(entry);
@@ -2106,7 +2110,8 @@ impl<Signer: Sign> ChannelMonitorImpl<Signer> {
21062110
}
21072111
}
21082112

2109-
self.onchain_tx_handler.update_claims_view(&txn_matched, claimable_outpoints, height, &&*broadcaster, &&*fee_estimator, &&*logger);
2113+
self.onchain_tx_handler.update_claims_view(&txn_matched, claimable_outpoints, conf_height,
2114+
cmp::max(conf_height, self.best_block.height()), broadcaster, fee_estimator, logger);
21102115

21112116
// Determine new outputs to watch by comparing against previously known outputs to watch,
21122117
// updating the latter in the process.

lightning/src/chain/onchaintx.rs

Lines changed: 20 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -343,15 +343,15 @@ impl<ChannelSigner: Sign> OnchainTxHandler<ChannelSigner> {
343343
/// (CSV or CLTV following cases). In case of high-fee spikes, claim tx may stuck in the mempool, so you need to bump its feerate quickly using Replace-By-Fee or Child-Pay-For-Parent.
344344
/// Panics if there are signing errors, because signing operations in reaction to on-chain events
345345
/// are not expected to fail, and if they do, we may lose funds.
346-
fn generate_claim_tx<F: Deref, L: Deref>(&mut self, height: u32, cached_request: &PackageTemplate, fee_estimator: &F, logger: &L) -> Option<(Option<u32>, u64, Transaction)>
346+
fn generate_claim_tx<F: Deref, L: Deref>(&mut self, cur_height: u32, cached_request: &PackageTemplate, fee_estimator: &F, logger: &L) -> Option<(Option<u32>, u64, Transaction)>
347347
where F::Target: FeeEstimator,
348348
L::Target: Logger,
349349
{
350350
if cached_request.outpoints().len() == 0 { return None } // But don't prune pending claiming request yet, we may have to resurrect HTLCs
351351

352352
// Compute new height timer to decide when we need to regenerate a new bumped version of the claim tx (if we
353353
// didn't receive confirmation of it before, or not enough reorg-safe depth on top of it).
354-
let new_timer = Some(cached_request.get_height_timer(height));
354+
let new_timer = Some(cached_request.get_height_timer(cur_height));
355355
if cached_request.is_malleable() {
356356
let predicted_weight = cached_request.package_weight(&self.destination_script);
357357
if let Some((output_value, new_feerate)) = cached_request.compute_package_output(predicted_weight, fee_estimator, logger) {
@@ -377,12 +377,15 @@ impl<ChannelSigner: Sign> OnchainTxHandler<ChannelSigner> {
377377
/// for this channel, provide new relevant on-chain transactions and/or new claim requests.
378378
/// Formerly this was named `block_connected`, but it is now also used for claiming an HTLC output
379379
/// if we receive a preimage after force-close.
380-
pub(crate) fn update_claims_view<B: Deref, F: Deref, L: Deref>(&mut self, txn_matched: &[&Transaction], requests: Vec<PackageTemplate>, height: u32, broadcaster: &B, fee_estimator: &F, logger: &L)
380+
/// `conf_height` represents the height at which the transactions in `txn_matched` were
381+
/// confirmed, this does not need to equal the current blockchain tip height, which should be
382+
/// provided via `cur_height`.
383+
pub(crate) fn update_claims_view<B: Deref, F: Deref, L: Deref>(&mut self, txn_matched: &[&Transaction], requests: Vec<PackageTemplate>, conf_height: u32, cur_height: u32, broadcaster: &B, fee_estimator: &F, logger: &L)
381384
where B::Target: BroadcasterInterface,
382385
F::Target: FeeEstimator,
383386
L::Target: Logger,
384387
{
385-
log_debug!(logger, "Updating claims view at height {} with {} matched transactions and {} claim requests", height, txn_matched.len(), requests.len());
388+
log_debug!(logger, "Updating claims view at height {} with {} matched transactions in block {} and {} claim requests", cur_height, txn_matched.len(), conf_height, requests.len());
386389
let mut preprocessed_requests = Vec::with_capacity(requests.len());
387390
let mut aggregated_request = None;
388391

@@ -401,17 +404,17 @@ impl<ChannelSigner: Sign> OnchainTxHandler<ChannelSigner> {
401404
continue;
402405
}
403406

404-
if req.package_timelock() > height + 1 {
405-
log_info!(logger, "Delaying claim of package until its timelock at {} (current height {}), the following outpoints are spent:", req.package_timelock(), height);
407+
if req.package_timelock() > cur_height + 1 {
408+
log_info!(logger, "Delaying claim of package until its timelock at {} (current height {}), the following outpoints are spent:", req.package_timelock(), cur_height);
406409
for outpoint in req.outpoints() {
407410
log_info!(logger, " Outpoint {}", outpoint);
408411
}
409412
self.locktimed_packages.entry(req.package_timelock()).or_insert(Vec::new()).push(req);
410413
continue;
411414
}
412415

413-
log_trace!(logger, "Test if outpoint can be aggregated with expiration {} against {}", req.timelock(), height + CLTV_SHARED_CLAIM_BUFFER);
414-
if req.timelock() <= height + CLTV_SHARED_CLAIM_BUFFER || !req.aggregable() {
416+
log_trace!(logger, "Test if outpoint can be aggregated with expiration {} against {}", req.timelock(), cur_height + CLTV_SHARED_CLAIM_BUFFER);
417+
if req.timelock() <= cur_height + CLTV_SHARED_CLAIM_BUFFER || !req.aggregable() {
415418
// Don't aggregate if outpoint package timelock is soon or marked as non-aggregable
416419
preprocessed_requests.push(req);
417420
} else if aggregated_request.is_none() {
@@ -425,8 +428,8 @@ impl<ChannelSigner: Sign> OnchainTxHandler<ChannelSigner> {
425428
preprocessed_requests.push(req);
426429
}
427430

428-
// Claim everything up to and including height + 1
429-
let remaining_locked_packages = self.locktimed_packages.split_off(&(height + 2));
431+
// Claim everything up to and including cur_height + 1
432+
let remaining_locked_packages = self.locktimed_packages.split_off(&(cur_height + 2));
430433
for (pop_height, mut entry) in self.locktimed_packages.iter_mut() {
431434
log_trace!(logger, "Restoring delayed claim of package(s) at their timelock at {}.", pop_height);
432435
preprocessed_requests.append(&mut entry);
@@ -436,13 +439,13 @@ impl<ChannelSigner: Sign> OnchainTxHandler<ChannelSigner> {
436439
// Generate claim transactions and track them to bump if necessary at
437440
// height timer expiration (i.e in how many blocks we're going to take action).
438441
for mut req in preprocessed_requests {
439-
if let Some((new_timer, new_feerate, tx)) = self.generate_claim_tx(height, &req, &*fee_estimator, &*logger) {
442+
if let Some((new_timer, new_feerate, tx)) = self.generate_claim_tx(cur_height, &req, &*fee_estimator, &*logger) {
440443
req.set_timer(new_timer);
441444
req.set_feerate(new_feerate);
442445
let txid = tx.txid();
443446
for k in req.outpoints() {
444447
log_info!(logger, "Registering claiming request for {}:{}", k.txid, k.vout);
445-
self.claimable_outpoints.insert(k.clone(), (txid, height));
448+
self.claimable_outpoints.insert(k.clone(), (txid, conf_height));
446449
}
447450
self.pending_claim_requests.insert(txid, req);
448451
log_info!(logger, "Broadcasting onchain {}", log_tx!(tx));
@@ -476,7 +479,7 @@ impl<ChannelSigner: Sign> OnchainTxHandler<ChannelSigner> {
476479
() => {
477480
let entry = OnchainEventEntry {
478481
txid: tx.txid(),
479-
height,
482+
height: conf_height,
480483
event: OnchainEvent::Claim { claim_request: first_claim_txid_height.0.clone() }
481484
};
482485
if !self.onchain_events_awaiting_threshold_conf.contains(&entry) {
@@ -516,7 +519,7 @@ impl<ChannelSigner: Sign> OnchainTxHandler<ChannelSigner> {
516519
for package in claimed_outputs_material.drain(..) {
517520
let entry = OnchainEventEntry {
518521
txid: tx.txid(),
519-
height,
522+
height: conf_height,
520523
event: OnchainEvent::ContentiousOutpoint { package },
521524
};
522525
if !self.onchain_events_awaiting_threshold_conf.contains(&entry) {
@@ -529,7 +532,7 @@ impl<ChannelSigner: Sign> OnchainTxHandler<ChannelSigner> {
529532
let onchain_events_awaiting_threshold_conf =
530533
self.onchain_events_awaiting_threshold_conf.drain(..).collect::<Vec<_>>();
531534
for entry in onchain_events_awaiting_threshold_conf {
532-
if entry.has_reached_confirmation_threshold(height) {
535+
if entry.has_reached_confirmation_threshold(cur_height) {
533536
match entry.event {
534537
OnchainEvent::Claim { claim_request } => {
535538
// We may remove a whole set of claim outpoints here, as these one may have
@@ -552,7 +555,7 @@ impl<ChannelSigner: Sign> OnchainTxHandler<ChannelSigner> {
552555
// Check if any pending claim request must be rescheduled
553556
for (first_claim_txid, ref request) in self.pending_claim_requests.iter() {
554557
if let Some(h) = request.timer() {
555-
if height >= h {
558+
if cur_height >= h {
556559
bump_candidates.insert(*first_claim_txid, (*request).clone());
557560
}
558561
}
@@ -561,7 +564,7 @@ impl<ChannelSigner: Sign> OnchainTxHandler<ChannelSigner> {
561564
// Build, bump and rebroadcast tx accordingly
562565
log_trace!(logger, "Bumping {} candidates", bump_candidates.len());
563566
for (first_claim_txid, request) in bump_candidates.iter() {
564-
if let Some((new_timer, new_feerate, bump_tx)) = self.generate_claim_tx(height, &request, &*fee_estimator, &*logger) {
567+
if let Some((new_timer, new_feerate, bump_tx)) = self.generate_claim_tx(cur_height, &request, &*fee_estimator, &*logger) {
565568
log_info!(logger, "Broadcasting RBF-bumped onchain {}", log_tx!(bump_tx));
566569
broadcaster.broadcast_transaction(&bump_tx);
567570
if let Some(request) = self.pending_claim_requests.get_mut(first_claim_txid) {

lightning/src/ln/functional_test_utils.rs

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -221,6 +221,9 @@ impl<'a, 'b, 'c> Node<'a, 'b, 'c> {
221221
pub fn best_block_info(&self) -> (BlockHash, u32) {
222222
self.blocks.lock().unwrap().last().map(|(a, b)| (a.block_hash(), *b)).unwrap()
223223
}
224+
pub fn get_block_header(&self, height: u32) -> BlockHeader {
225+
self.blocks.lock().unwrap()[height as usize].0
226+
}
224227
}
225228

226229
impl<'a, 'b, 'c> Drop for Node<'a, 'b, 'c> {

lightning/src/ln/functional_tests.rs

Lines changed: 83 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -12,8 +12,7 @@
1212
//! claim outputs on-chain.
1313
1414
use chain;
15-
use chain::Listen;
16-
use chain::Watch;
15+
use chain::{Confirm, Listen, Watch};
1716
use chain::channelmonitor;
1817
use chain::channelmonitor::{ChannelMonitor, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY};
1918
use chain::transaction::OutPoint;
@@ -9268,3 +9267,85 @@ fn test_invalid_funding_tx() {
92689267
} else { panic!(); }
92699268
assert_eq!(nodes[1].node.list_channels().len(), 0);
92709269
}
9270+
9271+
fn do_test_tx_confirmed_skipping_blocks_immediate_broadcast(check_no_broadcast_too_early: bool) {
9272+
// In the first version of the chain::Confirm interface, after a refactor was made to not
9273+
// broadcast CSV-locked transactions until their CSV lock is up, we wouldn't reliably broadcast
9274+
// transactions after a `transactions_confirmed` call. Specifically, if the chain, provided via
9275+
// `best_block_updated` is at height N, and a transaction output which we wish to spend at
9276+
// height N-1 (due to a CSV to height N-1) is provided at height N, we will not broadcast the
9277+
// spending transaction until height N+1 (or greater). This was due to the way
9278+
// `ChannelMonitor::transactions_confirmed` worked, only checking if we should broadcast a
9279+
// spending transaction at the height the input transaction was confirmed at, not whether we
9280+
// should broadcast a spending transaction at the current height.
9281+
// A second, similar, issue involved failing HTLCs backwards - because we only provided the
9282+
// height at which transactions were confirmed to `OnchainTx::update_claims_view`, it wasn't
9283+
// aware that the anti-reorg-delay had, in fact, already expired, waiting to fail-backwards
9284+
// until we learned about an additional block.
9285+
let chanmon_cfgs = create_chanmon_cfgs(3);
9286+
let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
9287+
let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
9288+
let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
9289+
*nodes[0].connect_style.borrow_mut() = ConnectStyle::BestBlockFirstSkippingBlocks;
9290+
9291+
create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
9292+
let (chan_announce, _, channel_id, _) = create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known());
9293+
let (_, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000);
9294+
nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id(), false);
9295+
nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
9296+
9297+
nodes[1].node.force_close_channel(&channel_id).unwrap();
9298+
check_closed_broadcast!(nodes[1], true);
9299+
check_added_monitors!(nodes[1], 1);
9300+
let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
9301+
assert_eq!(node_txn.len(), 1);
9302+
9303+
let conf_height = nodes[1].best_block_info().1;
9304+
if !check_no_broadcast_too_early {
9305+
connect_blocks(&nodes[1], 24 * 6);
9306+
} else {
9307+
// As an additional check, test that we aren't broadcasting transactions too early, either
9308+
}
9309+
nodes[1].chain_monitor.chain_monitor.transactions_confirmed(
9310+
&nodes[1].get_block_header(conf_height), &[(0, &node_txn[0])], conf_height);
9311+
if check_no_broadcast_too_early {
9312+
// If we confirmed the close transaction, but timelocks have not yet expired, we should not
9313+
// generate any events or broadcast any transactions
9314+
assert!(nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().is_empty());
9315+
assert!(nodes[1].chain_monitor.chain_monitor.get_and_clear_pending_events().is_empty());
9316+
} else {
9317+
// We should broadcast an HTLC transaction spending our funding transaction first
9318+
let spending_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
9319+
assert_eq!(spending_txn.len(), 2);
9320+
assert_eq!(spending_txn[0], node_txn[0]);
9321+
check_spends!(spending_txn[1], node_txn[0]);
9322+
// We should also generate a SpendableOutputs event with the to_self output (as its
9323+
// timelock is up).
9324+
let descriptor_spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
9325+
assert_eq!(descriptor_spend_txn.len(), 1);
9326+
9327+
// If we also discover that the HTLC-Timeout transaction was confirmed some time ago, we
9328+
// should immediately fail-backwards the HTLC to the previous hop, without waiting for an
9329+
// additional block built on top of the current chain.
9330+
nodes[1].chain_monitor.chain_monitor.transactions_confirmed(
9331+
&nodes[1].get_block_header(conf_height + 1), &[(0, &spending_txn[1])], conf_height + 1);
9332+
expect_pending_htlcs_forwardable!(nodes[1]);
9333+
check_added_monitors!(nodes[1], 1);
9334+
9335+
let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
9336+
assert!(updates.update_add_htlcs.is_empty());
9337+
assert!(updates.update_fulfill_htlcs.is_empty());
9338+
assert_eq!(updates.update_fail_htlcs.len(), 1);
9339+
assert!(updates.update_fail_malformed_htlcs.is_empty());
9340+
assert!(updates.update_fee.is_none());
9341+
nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
9342+
commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, true, true);
9343+
expect_payment_failed!(nodes[0], payment_hash, false);
9344+
expect_payment_failure_chan_update!(nodes[0], chan_announce.contents.short_channel_id, true);
9345+
}
9346+
}
9347+
#[test]
9348+
fn test_tx_confirmed_skipping_blocks_immediate_broadcast() {
9349+
do_test_tx_confirmed_skipping_blocks_immediate_broadcast(false);
9350+
do_test_tx_confirmed_skipping_blocks_immediate_broadcast(true);
9351+
}

0 commit comments

Comments
 (0)