Skip to content

Misc Tweaks #253

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 3 commits into from
Nov 16, 2018
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
24 changes: 19 additions & 5 deletions src/ln/channel.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1122,8 +1122,9 @@ impl Channel {
Ok(our_sig)
}

/// May return an IgnoreError, but should not, and will always return Ok(_) when
/// debug_assertions are turned on
/// Per HTLC, only one get_update_fail_htlc or get_update_fulfill_htlc call may be made.
/// In such cases we debug_assert!(false) and return an IgnoreError. Thus, will always return
/// Ok(_) if debug assertions are turned on and preconditions are met.
fn get_update_fulfill_htlc(&mut self, htlc_id_arg: u64, payment_preimage_arg: [u8; 32]) -> Result<(Option<msgs::UpdateFulfillHTLC>, Option<ChannelMonitor>), HandleError> {
// Either ChannelFunded got set (which means it wont bet unset) or there is no way any
// caller thought we could have something claimed (cause we wouldn't have accepted in an
Expand Down Expand Up @@ -1175,7 +1176,9 @@ impl Channel {
&HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } => {
if htlc_id_arg == htlc_id {
debug_assert!(false, "Tried to fulfill an HTLC we already had a holding-cell failure on");
return Err(HandleError{err: "Unable to find a pending HTLC which matched the given HTLC ID", action: Some(msgs::ErrorAction::IgnoreError)});
// Return the new channel monitor in a last-ditch effort to hit the
// chain and claim the funds
return Ok((None, Some(self.channel_monitor.clone())));
}
},
_ => {}
Expand Down Expand Up @@ -1215,8 +1218,9 @@ impl Channel {
}
}

/// May return an IgnoreError, but should not, and will always return Ok(_) when
/// debug_assertions are turned on
/// Per HTLC, only one get_update_fail_htlc or get_update_fulfill_htlc call may be made.
/// In such cases we debug_assert!(false) and return an IgnoreError. Thus, will always return
/// Ok(_) if debug assertions are turned on and preconditions are met.
pub fn get_update_fail_htlc(&mut self, htlc_id_arg: u64, err_packet: msgs::OnionErrorPacket) -> Result<Option<msgs::UpdateFailHTLC>, HandleError> {
if (self.channel_state & (ChannelState::ChannelFunded as u32)) != (ChannelState::ChannelFunded as u32) {
panic!("Was asked to fail an HTLC when channel was not in an operational state");
Expand Down Expand Up @@ -2823,6 +2827,16 @@ impl Channel {
self.channel_update_count += 1;
return Err(HandleError{err: "funding tx had wrong script/value", action: Some(ErrorAction::DisconnectPeer{msg: None})});
} else {
if self.channel_outbound {
for input in tx.input.iter() {
if input.witness.is_empty() {
// We generated a malleable funding transaction, implying we've
// just exposed ourselves to funds loss to our counterparty.
#[cfg(not(feature = "fuzztarget"))]
panic!("Client called ChannelManager::funding_transaction_generated with bogus transaction!");
}
}
}
self.funding_tx_confirmations = 1;
self.short_channel_id = Some(((height as u64) << (5*8)) |
((*index_in_block as u64) << (2*8)) |
Expand Down
64 changes: 36 additions & 28 deletions src/ln/channelmanager.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6212,7 +6212,7 @@ mod tests {

/// pending_htlc_adds includes both the holding cell and in-flight update_add_htlcs, whereas
/// for claims/fails they are separated out.
fn reconnect_nodes(node_a: &Node, node_b: &Node, pre_all_htlcs: bool, pending_htlc_adds: (i64, i64), pending_htlc_claims: (usize, usize), pending_cell_htlc_claims: (usize, usize), pending_cell_htlc_fails: (usize, usize), pending_raa: (bool, bool)) {
fn reconnect_nodes(node_a: &Node, node_b: &Node, send_funding_locked: (bool, bool), pending_htlc_adds: (i64, i64), pending_htlc_claims: (usize, usize), pending_cell_htlc_claims: (usize, usize), pending_cell_htlc_fails: (usize, usize), pending_raa: (bool, bool)) {
node_a.node.peer_connected(&node_b.node.get_our_node_id());
let reestablish_1 = get_chan_reestablish_msgs!(node_a, node_b);
node_b.node.peer_connected(&node_a.node.get_our_node_id());
Expand Down Expand Up @@ -6245,7 +6245,7 @@ mod tests {
(pending_htlc_adds.1 == 0 && pending_htlc_claims.1 == 0 && pending_cell_htlc_claims.1 == 0 && pending_cell_htlc_fails.1 == 0));

for chan_msgs in resp_1.drain(..) {
if pre_all_htlcs {
if send_funding_locked.0 {
node_a.node.handle_funding_locked(&node_b.node.get_our_node_id(), &chan_msgs.0.unwrap()).unwrap();
let announcement_event = node_a.node.get_and_clear_pending_msg_events();
if !announcement_event.is_empty() {
Expand Down Expand Up @@ -6302,7 +6302,7 @@ mod tests {
}

for chan_msgs in resp_2.drain(..) {
if pre_all_htlcs {
if send_funding_locked.1 {
node_b.node.handle_funding_locked(&node_a.node.get_our_node_id(), &chan_msgs.0.unwrap()).unwrap();
let announcement_event = node_b.node.get_and_clear_pending_msg_events();
if !announcement_event.is_empty() {
Expand Down Expand Up @@ -6366,7 +6366,7 @@ mod tests {

nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
reconnect_nodes(&nodes[0], &nodes[1], true, (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
reconnect_nodes(&nodes[0], &nodes[1], (true, true), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));

let payment_preimage_1 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).0;
let payment_hash_2 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).1;
Expand All @@ -6375,7 +6375,7 @@ mod tests {

nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
reconnect_nodes(&nodes[0], &nodes[1], false, (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));

let payment_preimage_3 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).0;
let payment_preimage_4 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).0;
Expand All @@ -6388,7 +6388,7 @@ mod tests {
claim_payment_along_route(&nodes[0], &vec!(&nodes[1], &nodes[2]), true, payment_preimage_3);
fail_payment_along_route(&nodes[0], &[&nodes[1], &nodes[2]], true, payment_hash_5);

reconnect_nodes(&nodes[0], &nodes[1], false, (0, 0), (0, 0), (1, 0), (1, 0), (false, false));
reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (1, 0), (1, 0), (false, false));
{
let events = nodes[0].node.get_and_clear_pending_events();
assert_eq!(events.len(), 2);
Expand Down Expand Up @@ -6469,19 +6469,19 @@ mod tests {
if messages_delivered < 3 {
// Even if the funding_locked messages get exchanged, as long as nothing further was
// received on either side, both sides will need to resend them.
reconnect_nodes(&nodes[0], &nodes[1], true, (0, 1), (0, 0), (0, 0), (0, 0), (false, false));
reconnect_nodes(&nodes[0], &nodes[1], (true, true), (0, 1), (0, 0), (0, 0), (0, 0), (false, false));
} else if messages_delivered == 3 {
// nodes[0] still wants its RAA + commitment_signed
reconnect_nodes(&nodes[0], &nodes[1], false, (-1, 0), (0, 0), (0, 0), (0, 0), (true, false));
reconnect_nodes(&nodes[0], &nodes[1], (false, false), (-1, 0), (0, 0), (0, 0), (0, 0), (true, false));
} else if messages_delivered == 4 {
// nodes[0] still wants its commitment_signed
reconnect_nodes(&nodes[0], &nodes[1], false, (-1, 0), (0, 0), (0, 0), (0, 0), (false, false));
reconnect_nodes(&nodes[0], &nodes[1], (false, false), (-1, 0), (0, 0), (0, 0), (0, 0), (false, false));
} else if messages_delivered == 5 {
// nodes[1] still wants its final RAA
reconnect_nodes(&nodes[0], &nodes[1], false, (0, 0), (0, 0), (0, 0), (0, 0), (false, true));
reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (false, true));
} else if messages_delivered == 6 {
// Everything was delivered...
reconnect_nodes(&nodes[0], &nodes[1], false, (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
}

let events_1 = nodes[1].node.get_and_clear_pending_events();
Expand All @@ -6493,7 +6493,7 @@ mod tests {

nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
reconnect_nodes(&nodes[0], &nodes[1], false, (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));

nodes[1].node.channel_state.lock().unwrap().next_forward = Instant::now();
nodes[1].node.process_pending_htlc_forwards();
Expand Down Expand Up @@ -6567,7 +6567,7 @@ mod tests {
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
if messages_delivered < 2 {
reconnect_nodes(&nodes[0], &nodes[1], false, (0, 0), (1, 0), (0, 0), (0, 0), (false, false));
reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (1, 0), (0, 0), (0, 0), (false, false));
//TODO: Deduplicate PaymentSent events, then enable this if:
//if messages_delivered < 1 {
let events_4 = nodes[0].node.get_and_clear_pending_events();
Expand All @@ -6581,21 +6581,21 @@ mod tests {
//}
} else if messages_delivered == 2 {
// nodes[0] still wants its RAA + commitment_signed
reconnect_nodes(&nodes[0], &nodes[1], false, (0, -1), (0, 0), (0, 0), (0, 0), (false, true));
reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, -1), (0, 0), (0, 0), (0, 0), (false, true));
} else if messages_delivered == 3 {
// nodes[0] still wants its commitment_signed
reconnect_nodes(&nodes[0], &nodes[1], false, (0, -1), (0, 0), (0, 0), (0, 0), (false, false));
reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, -1), (0, 0), (0, 0), (0, 0), (false, false));
} else if messages_delivered == 4 {
// nodes[1] still wants its final RAA
reconnect_nodes(&nodes[0], &nodes[1], false, (0, 0), (0, 0), (0, 0), (0, 0), (true, false));
reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (true, false));
} else if messages_delivered == 5 {
// Everything was delivered...
reconnect_nodes(&nodes[0], &nodes[1], false, (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
}

nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
reconnect_nodes(&nodes[0], &nodes[1], false, (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));

// Channel should still work fine...
let payment_preimage_2 = send_along_route(&nodes[0], route, &[&nodes[1]], 1000000).0;
Expand Down Expand Up @@ -6636,20 +6636,28 @@ mod tests {
_ => panic!("Unexpected event"),
}

reconnect_nodes(&nodes[0], &nodes[1], (false, true), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

What was the kind of bug ? (for the record if one of us beach on it again)

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I saw some issue running rust-lightning-bitcoincorerpc opening an outbound channel then restarting when the remote side had sent its funding_locked but we had not yet sent ours.


nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);

confirm_transaction(&nodes[1].chain_monitor, &tx, tx.version);
let events_2 = nodes[1].node.get_and_clear_pending_msg_events();
assert_eq!(events_2.len(), 1);
assert_eq!(events_2.len(), 2);
match events_2[0] {
MessageSendEvent::SendFundingLocked { ref node_id, msg: _ } => {
assert_eq!(*node_id, nodes[0].node.get_our_node_id());
},
_ => panic!("Unexpected event"),
}
match events_2[1] {
MessageSendEvent::SendAnnouncementSignatures { ref node_id, msg: _ } => {
assert_eq!(*node_id, nodes[0].node.get_our_node_id());
},
_ => panic!("Unexpected event"),
}

reconnect_nodes(&nodes[0], &nodes[1], true, (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
reconnect_nodes(&nodes[0], &nodes[1], true, (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
reconnect_nodes(&nodes[0], &nodes[1], (true, true), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));

// TODO: We shouldn't need to manually pass list_usable_chanels here once we support
// rebroadcasting announcement_signatures upon reconnect.
Expand Down Expand Up @@ -6852,7 +6860,7 @@ mod tests {
if disconnect {
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
reconnect_nodes(&nodes[0], &nodes[1], true, (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
reconnect_nodes(&nodes[0], &nodes[1], (true, true), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
}

*nodes[0].chan_monitor.update_ret.lock().unwrap() = Ok(());
Expand Down Expand Up @@ -6893,7 +6901,7 @@ mod tests {
if disconnect {
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
reconnect_nodes(&nodes[0], &nodes[1], false, (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
}

// ...and make sure we can force-close a TemporaryFailure channel with a PermanentFailure
Expand Down Expand Up @@ -7453,7 +7461,7 @@ mod tests {
nodes[0].node = Arc::new(nodes_0_deserialized);
check_added_monitors!(nodes[0], 1);

reconnect_nodes(&nodes[0], &nodes[1], false, (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));

fail_payment(&nodes[0], &[&nodes[1]], our_payment_hash);
claim_payment(&nodes[0], &[&nodes[1]], our_payment_preimage);
Expand Down Expand Up @@ -7523,8 +7531,8 @@ mod tests {
nodes[0].node = Arc::new(nodes_0_deserialized);

// nodes[1] and nodes[2] have no lost state with nodes[0]...
reconnect_nodes(&nodes[0], &nodes[1], false, (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
reconnect_nodes(&nodes[0], &nodes[2], false, (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
reconnect_nodes(&nodes[0], &nodes[2], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
//... and we can even still claim the payment!
claim_payment(&nodes[2], &[&nodes[0], &nodes[1]], our_payment_preimage);

Expand Down