Skip to content

BOLT5: fix claim backward revoked htlc success #322

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
27 changes: 25 additions & 2 deletions src/ln/channelmonitor.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1873,8 +1873,9 @@ impl ChannelMonitor {
macro_rules! log_claim {
($tx_info: expr, $local_tx: expr, $htlc: expr, $source_avail: expr) => {
// We found the output in question, but aren't failing it backwards
// as we have no corresponding source. This implies either it is an
// inbound HTLC or an outbound HTLC on a revoked transaction.
// as we have no corresponding source and no valid remote commitment txid
// to try a weak source binding with same-hash, same-value still-valid offered HTLC.
// This implies either it is an inbound HTLC or an outbound HTLC on a revoked transaction.
let outbound_htlc = $local_tx == $htlc.offered;
if ($local_tx && revocation_sig_claim) ||
(outbound_htlc && !$source_avail && (accepted_preimage_claim || offered_preimage_claim)) {
Expand All @@ -1891,6 +1892,21 @@ impl ChannelMonitor {
}
}

macro_rules! check_htlc_valid_remote {
($remote_txid: expr, $htlc_output: expr) => {
if let &Some(txid) = $remote_txid {
for &(ref pending_htlc, ref pending_source) in self.remote_claimable_outpoints.get(&txid).unwrap() {
if pending_htlc.payment_hash == $htlc_output.payment_hash && pending_htlc.amount_msat == $htlc_output.amount_msat {
if let &Some(ref source) = pending_source {
payment_data = Some(((**source).clone(), $htlc_output.payment_hash));
break;
}
}
}
}
}
}

macro_rules! scan_commitment {
($htlcs: expr, $tx_info: expr, $local_tx: expr) => {
for (ref htlc_output, source_option) in $htlcs {
Expand All @@ -1903,6 +1919,13 @@ impl ChannelMonitor {
// has timed out, or we screwed up. In any case, we should now
// resolve the source HTLC with the original sender.
payment_data = Some(((*source).clone(), htlc_output.payment_hash));
} else if !$local_tx {
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Note that we still want to hit the last else even if !$local_tx as long as payment_data is still none.

log_claim!($tx_info, $local_tx, htlc_output, false);
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Hmm, I think you only want to do this in the payment_data case.

if let Storage::Local { ref current_remote_commitment_txid, .. } = self.key_storage {
check_htlc_valid_remote!(current_remote_commitment_txid, htlc_output);
} else if let Storage::Local { ref prev_remote_commitment_txid, .. } = self.key_storage {
check_htlc_valid_remote!(prev_remote_commitment_txid, htlc_output);
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This if block can never be hit (and should only run if the previous block failed to set payment_data).

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yes, in fact even fail to build a test to this, but wasn't 100% sure it can't be hit

}
} else {
log_claim!($tx_info, $local_tx, htlc_output, false);
continue 'outer_loop;
Expand Down
25 changes: 16 additions & 9 deletions src/ln/functional_test_utils.rs
Original file line number Diff line number Diff line change
Expand Up @@ -862,26 +862,33 @@ pub fn test_txn_broadcast(node: &Node, chan: &(msgs::ChannelUpdate, msgs::Channe
false
} else { true }
});
assert!(res.len() == 2 || res.len() == 3);
assert!(res.len() == 2 || res.len() == 3 || res.len() == 5);
if res.len() == 3 {
assert_eq!(res[1], res[2]);
}
if res.len() == 5 {
assert_eq!(res[1], res[3]);
assert_eq!(res[2], res[4]);
}
}

assert!(node_txn.is_empty());
res
}

/// Tests that the given node has broadcast a claim transaction against the provided revoked
/// HTLC transaction.
pub fn test_revoked_htlc_claim_txn_broadcast(node: &Node, revoked_tx: Transaction) {
/// Tests that the given node has broadcast a claim transaction against one of the provided revoked
/// HTLC transactions.
pub fn test_revoked_htlc_claim_txn_broadcast(node: &Node, revoked_txn: Vec<Transaction>, expected: usize) {
let mut node_txn = node.tx_broadcaster.txn_broadcasted.lock().unwrap();
assert_eq!(node_txn.len(), 1);
assert_eq!(node_txn.len(), expected);
node_txn.retain(|tx| {
if tx.input.len() == 1 && tx.input[0].previous_output.txid == revoked_tx.txid() {
check_spends!(tx, revoked_tx.clone());
false
} else { true }
for revoked_tx in &revoked_txn {
if tx.input.len() == 1 && tx.input[0].previous_output.txid == revoked_tx.txid() {
check_spends!(tx, revoked_tx.clone());
return false
}
}
true
});
assert!(node_txn.is_empty());
}
Expand Down
116 changes: 87 additions & 29 deletions src/ln/functional_tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1580,24 +1580,25 @@ fn channel_monitor_network_test() {
#[test]
fn test_justice_tx() {
// Test justice txn built on revoked HTLC-Success tx, against both sides
// Also, test we're able to extract payment preimage cleanly from revoked HTLC-Success tx and pass it backward

let nodes = create_network(2);
let nodes = create_network(3);
// Create some new channels:
let chan_5 = create_announced_chan_between_nodes(&nodes, 0, 1);
let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);

// A pending HTLC which will be revoked:
let payment_preimage_3 = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
let payment_preimage_1 = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
// Get the will-be-revoked local txn from nodes[0]
let revoked_local_txn = nodes[0].node.channel_state.lock().unwrap().by_id.iter().next().unwrap().1.last_local_commitment_txn.clone();
assert_eq!(revoked_local_txn.len(), 2); // First commitment tx, then HTLC tx
assert_eq!(revoked_local_txn[0].input.len(), 1);
assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_5.3.txid());
assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_1.3.txid());
assert_eq!(revoked_local_txn[0].output.len(), 2); // Only HTLC and output back to 0 are present
assert_eq!(revoked_local_txn[1].input.len(), 1);
assert_eq!(revoked_local_txn[1].input[0].previous_output.txid, revoked_local_txn[0].txid());
assert_eq!(revoked_local_txn[1].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); // HTLC-Timeout
// Revoke the old state
claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_3);
claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_1);

{
let mut header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
Expand All @@ -1611,56 +1612,113 @@ fn test_justice_tx() {
check_spends!(node_txn[0], revoked_local_txn[0].clone());
node_txn.swap_remove(0);
}
test_txn_broadcast(&nodes[1], &chan_5, None, HTLCType::NONE);
test_txn_broadcast(&nodes[1], &chan_1, None, HTLCType::NONE);

nodes[0].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
let node_txn = test_txn_broadcast(&nodes[0], &chan_5, Some(revoked_local_txn[0].clone()), HTLCType::TIMEOUT);
let node_txn = test_txn_broadcast(&nodes[0], &chan_1, Some(revoked_local_txn[0].clone()), HTLCType::TIMEOUT);
header = BlockHeader { version: 0x20000000, prev_blockhash: header.bitcoin_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![node_txn[1].clone()] }, 1);
test_revoked_htlc_claim_txn_broadcast(&nodes[1], node_txn[1].clone());
test_revoked_htlc_claim_txn_broadcast(&nodes[1], vec![node_txn[1].clone()], 1);
}
get_announce_close_broadcast_events(&nodes, 0, 1);

assert_eq!(nodes[0].node.list_channels().len(), 0);
assert_eq!(nodes[1].node.list_channels().len(), 0);

// We test justice_tx build by A on B's revoked HTLC-Success tx
// We test justice_tx build by B on C's revoked HTLC-Success tx
// Create some new channels:
let chan_6 = create_announced_chan_between_nodes(&nodes, 0, 1);
create_announced_chan_between_nodes(&nodes, 0, 1);
let chan_3 = create_announced_chan_between_nodes(&nodes, 1, 2);

// A pending HTLC which will be revoked:
let payment_preimage_4 = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
// Get the will-be-revoked local txn from B
let revoked_local_txn = nodes[1].node.channel_state.lock().unwrap().by_id.iter().next().unwrap().1.last_local_commitment_txn.clone();
let payment_preimage_2 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 3000000).0;
let payment_preimage_3 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 3000000).0;
// Get the will-be-revoked local txn from C
let revoked_local_txn = nodes[2].node.channel_state.lock().unwrap().by_id.iter().next().unwrap().1.last_local_commitment_txn.clone();
assert_eq!(revoked_local_txn.len(), 1); // Only commitment tx
assert_eq!(revoked_local_txn[0].input.len(), 1);
assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_6.3.txid());
assert_eq!(revoked_local_txn[0].output.len(), 2); // Only HTLC and output back to A are present
// Revoke the old state
claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_4);
assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_3.3.txid());
assert_eq!(revoked_local_txn[0].output.len(), 3); // Only 2 HTLC and output back to B are present

// Revoke the old state - but only between B and C as we test update_fulfill_htlc to A generation from B ability to extract preimage from C's revoked HTLC-success
//claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], payment_preimage_2);
nodes[2].node.claim_funds(payment_preimage_2);
check_added_monitors!(nodes[2], 1);
let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
assert_eq!(updates.update_fulfill_htlcs.len(), 1);
nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]).unwrap();
check_added_monitors!(nodes[1], 1);
let updates_2 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false);
nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates_2.update_fulfill_htlcs[0]).unwrap();
commitment_signed_dance!(nodes[0], nodes[1], updates_2.commitment_signed, false);
let events = nodes[0].node.get_and_clear_pending_events();
assert_eq!(events.len(), 1);
match events[0] {
Event::PaymentSent { payment_preimage } => {
assert_eq!(payment_preimage, payment_preimage_2);
},
_ => panic!("Unexpected event"),
}

nodes[2].node.claim_funds(payment_preimage_3);
check_added_monitors!(nodes[2], 1);
get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
{
let mut header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
nodes[0].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
{
let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
assert_eq!(node_txn.len(), 3);
let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
assert_eq!(node_txn.len(), 4);
assert_eq!(node_txn.pop().unwrap(), node_txn[0]); // An outpoint registration will result in a 2nd block_connected
assert_eq!(node_txn[0].input.len(), 1); // We claim the received HTLC output
assert_eq!(node_txn[0].input.len(), 2); // We claim the received HTLC output

check_spends!(node_txn[0], revoked_local_txn[0].clone());
check_spends!(node_txn[1], chan_3.3.clone()); // Local commitment tx
check_spends!(node_txn[2], node_txn[1].clone()); // HTLC-Timeout tx on local
node_txn.swap_remove(0);
}
test_txn_broadcast(&nodes[0], &chan_6, None, HTLCType::NONE);
test_txn_broadcast(&nodes[1], &chan_3, None, HTLCType::TIMEOUT);
let events = nodes[1].node.get_and_clear_pending_msg_events();
assert_eq!(events.len(), 1);
match events[0] {
MessageSendEvent::BroadcastChannelUpdate { .. } => {},
_ => panic!("Unexpected event"),
}
let events = nodes[1].node.get_and_clear_pending_events();
assert_eq!(events.len(), 1);
match events[0] {
Event::PendingHTLCsForwardable { .. } => {},
_ => panic!("Unexpected event"),
}

nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
let node_txn = test_txn_broadcast(&nodes[1], &chan_6, Some(revoked_local_txn[0].clone()), HTLCType::SUCCESS);
nodes[2].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
let events = nodes[2].node.get_and_clear_pending_msg_events();
assert_eq!(events.len(), 1);
match events[0] {
MessageSendEvent::BroadcastChannelUpdate { .. } => {},
_ => panic!("Unexpected event"),
}
let node_txn = test_txn_broadcast(&nodes[2], &chan_3, Some(revoked_local_txn[0].clone()), HTLCType::SUCCESS);
header = BlockHeader { version: 0x20000000, prev_blockhash: header.bitcoin_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
nodes[0].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![node_txn[1].clone()] }, 1);
test_revoked_htlc_claim_txn_broadcast(&nodes[0], node_txn[1].clone());
nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![node_txn[1].clone(), node_txn[2].clone()] }, 1);
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Ugh, I really hate this test - at this point nodes[1] has a pending fail_backwards of the HTLC and its just waiting on a future process_pending_htlc_forwards() call. We relay on claims bypassing the pending_forwards stuff to skip the fail and actually fulfill. At some point I'd kinda like to batch claims in addition to fails, which would break this test. I'd kinda prefer we just leave the test change until #305 lands.

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Got your point, agree to wait on this

test_revoked_htlc_claim_txn_broadcast(&nodes[1], vec![node_txn[1].clone(), node_txn[2].clone()], 2);
let updates_3 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
check_added_monitors!(nodes[1], 1);
nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates_3.update_fulfill_htlcs[0]).unwrap();
commitment_signed_dance!(nodes[0], nodes[1], updates_3.commitment_signed, false);
let events = nodes[0].node.get_and_clear_pending_events();
assert_eq!(events.len(), 1);
match events[0] {
Event::PaymentSent { payment_preimage } => {
assert_eq!(payment_preimage, payment_preimage_3);
},
_ => panic!("Unexpected event"),
}
}
get_announce_close_broadcast_events(&nodes, 0, 1);
assert_eq!(nodes[0].node.list_channels().len(), 0);
assert_eq!(nodes[1].node.list_channels().len(), 0);
assert_eq!(nodes[0].node.list_channels().len(), 1);
assert_eq!(nodes[1].node.list_channels().len(), 1);
assert_eq!(nodes[2].node.list_channels().len(), 0);
}

#[test]
Expand Down