diff --git a/fuzz/fuzz_targets/router_target.rs b/fuzz/fuzz_targets/router_target.rs index 52f9a235fe7..975cfe1ec3b 100644 --- a/fuzz/fuzz_targets/router_target.rs +++ b/fuzz/fuzz_targets/router_target.rs @@ -187,7 +187,7 @@ pub fn do_test(data: &[u8]) { }, 1 => { let short_channel_id = slice_to_be64(get_slice!(8)); - router.handle_htlc_fail_channel_update(&msgs::HTLCFailChannelUpdate::ChannelClosed {short_channel_id}); + router.handle_htlc_fail_channel_update(&msgs::HTLCFailChannelUpdate::ChannelClosed {short_channel_id, is_permanent: false}); }, _ => return, } @@ -223,6 +223,7 @@ pub fn do_test(data: &[u8]) { fee_proportional_millionths: slice_to_be32(get_slice!(4)), cltv_expiry_delta: slice_to_be16(get_slice!(2)), htlc_minimum_msat: slice_to_be64(get_slice!(8)), + last_update: 0, }); } &last_hops_vec[..] diff --git a/src/ln/channel.rs b/src/ln/channel.rs index 1b7db97d3d3..464bfc6e303 100644 --- a/src/ln/channel.rs +++ b/src/ln/channel.rs @@ -1545,7 +1545,7 @@ impl Channel { Ok(()) } - /// Removes an outbound HTLC which has been commitment_signed by the remote end + /// Marks an outbound HTLC which we have received update_fail/fulfill/malformed #[inline] fn mark_outbound_htlc_removed(&mut self, htlc_id: u64, check_preimage: Option<[u8; 32]>, fail_reason: Option) -> Result<&HTLCSource, ChannelError> { for htlc in self.pending_outbound_htlcs.iter_mut() { @@ -1559,13 +1559,13 @@ impl Channel { }; match htlc.state { OutboundHTLCState::LocalAnnounced(_) => - return Err(ChannelError::Close("Remote tried to fulfill HTLC before it had been committed")), + return Err(ChannelError::Close("Remote tried to fulfill/fail HTLC before it had been committed")), OutboundHTLCState::Committed => { htlc.state = OutboundHTLCState::RemoteRemoved; htlc.fail_reason = fail_reason; }, OutboundHTLCState::AwaitingRemoteRevokeToRemove | OutboundHTLCState::AwaitingRemovedRemoteRevoke | OutboundHTLCState::RemoteRemoved => - return Err(ChannelError::Close("Remote tried to fulfill HTLC that they'd already fulfilled")), + return Err(ChannelError::Close("Remote tried to fulfill/fail HTLC that they'd already fulfilled/failed")), } return Ok(&htlc.source); } @@ -2450,6 +2450,11 @@ impl Channel { self.our_htlc_minimum_msat } + /// Allowed in any state (including after shutdown) + pub fn get_their_htlc_minimum_msat(&self) -> u64 { + self.our_htlc_minimum_msat + } + pub fn get_value_satoshis(&self) -> u64 { self.channel_value_satoshis } diff --git a/src/ln/channelmanager.rs b/src/ln/channelmanager.rs index c8ed743e1d0..a0b160cc64b 100644 --- a/src/ln/channelmanager.rs +++ b/src/ln/channelmanager.rs @@ -23,7 +23,7 @@ use secp256k1; use chain::chaininterface::{BroadcasterInterface,ChainListener,ChainWatchInterface,FeeEstimator}; use chain::transaction::OutPoint; use ln::channel::{Channel, ChannelError, ChannelKeys}; -use ln::channelmonitor::ManyChannelMonitor; +use ln::channelmonitor::{ManyChannelMonitor, CLTV_CLAIM_BUFFER, HTLC_FAIL_TIMEOUT_BLOCKS}; use ln::router::{Route,RouteHop}; use ln::msgs; use ln::msgs::{HandleError,ChannelMessageHandler}; @@ -98,6 +98,14 @@ mod channel_held_info { pub(super) incoming_packet_shared_secret: SharedSecret, } + /// First hop payment data. This is used to reconstruct amt_to_forward + /// and outgoing_cltv_value for each hop combined with data from RouteHope + #[derive(Clone)] + pub struct PaymentData { + pub(super) htlc_msat: u64, + pub(super) block_height: u32, + } + /// Tracks the inbound corresponding to an outbound HTLC #[derive(Clone)] pub enum HTLCSource { @@ -105,6 +113,7 @@ mod channel_held_info { OutboundRoute { route: Route, session_priv: SecretKey, + payment_data: PaymentData, }, } #[cfg(test)] @@ -113,6 +122,7 @@ mod channel_held_info { HTLCSource::OutboundRoute { route: Route { hops: Vec::new() }, session_priv: SecretKey::from_slice(&::secp256k1::Secp256k1::without_caps(), &[1; 32]).unwrap(), + payment_data: PaymentData { htlc_msat: 0, block_height: 0 }, } } } @@ -290,8 +300,29 @@ pub struct ChannelManager { logger: Arc, } +/// The minimum number of blocks between an inbound HTLC's CLTV and the corresponding outbound +/// HTLC's CLTV. This should always be a few blocks greater than channelmonitor::CLTV_CLAIM_BUFFER, +/// ie the node we forwarded the payment on to should always have enough room to reliably time out +/// the HTLC via a full update_fail_htlc/commitment_signed dance before we hit the +/// CLTV_CLAIM_BUFFER point (we static assert that its at least 3 blocks more). const CLTV_EXPIRY_DELTA: u16 = 6 * 24 * 2; //TODO? +// Check that our CLTV_EXPIRY is at least CLTV_CLAIM_BUFFER + 2*HTLC_FAIL_TIMEOUT_BLOCKS, ie that +// if the next-hop peer fails the HTLC within HTLC_FAIL_TIMEOUT_BLOCKS then we'll still have +// HTLC_FAIL_TIMEOUT_BLOCKS left to fail it backwards ourselves before hitting the +// CLTV_CLAIM_BUFFER point and failing the channel on-chain to time out the HTLC. +#[deny(const_err)] +#[allow(dead_code)] +const CHECK_CLTV_EXPIRY_SANITY: u32 = CLTV_EXPIRY_DELTA as u32 - 2*HTLC_FAIL_TIMEOUT_BLOCKS - CLTV_CLAIM_BUFFER; + +// Check for ability of an attacker to make us fail on-chain by delaying inbound claim. See +// ChannelMontior::would_broadcast_at_height for a description of why this is needed. +#[deny(const_err)] +#[allow(dead_code)] +const CHECK_CLTV_EXPIRY_SANITY_2: u32 = CLTV_EXPIRY_DELTA as u32 - HTLC_FAIL_TIMEOUT_BLOCKS - 2*CLTV_CLAIM_BUFFER; + +const CLTV_FAR_FAR_AWAY: u32 = 6 * 24 * 7; //TODO? + macro_rules! secp_call { ( $res: expr, $err: expr ) => { match $res { @@ -645,8 +676,7 @@ impl ChannelManager { Ok(res) } - /// returns the hop data, as well as the first-hop value_msat and CLTV value we should send. - fn build_onion_payloads(route: &Route, starting_htlc_offset: u32) -> Result<(Vec, u64, u32), APIError> { + fn build_onion_payloads_with_realm(route: &Route, starting_htlc_offset: u32, realm: u8) -> Result<(Vec, u64, u32), APIError> { let mut cur_value_msat = 0u64; let mut cur_cltv = starting_htlc_offset; let mut last_short_channel_id = 0; @@ -661,7 +691,7 @@ impl ChannelManager { let value_msat = if cur_value_msat == 0 { hop.fee_msat } else { cur_value_msat }; let cltv = if cur_cltv == starting_htlc_offset { hop.cltv_expiry_delta + starting_htlc_offset } else { cur_cltv }; res[idx] = msgs::OnionHopData { - realm: 0, + realm: realm, data: msgs::OnionRealm0HopData { short_channel_id: last_short_channel_id, amt_to_forward: value_msat, @@ -682,6 +712,11 @@ impl ChannelManager { Ok((res, cur_value_msat, cur_cltv)) } + /// returns the hop data, as well as the first-hop value_msat and CLTV value we should send. + fn build_onion_payloads(route: &Route, starting_htlc_offset: u32) -> Result<(Vec, u64, u32), APIError> { + ChannelManager::build_onion_payloads_with_realm(route, starting_htlc_offset, 0) + } + #[inline] fn shift_arr_right(arr: &mut [u8; 20*65]) { unsafe { @@ -877,16 +912,24 @@ impl ChannelManager { } }; - //TODO: Check that msg.cltv_expiry is within acceptable bounds! - let pending_forward_info = if next_hop_data.hmac == [0; 32] { // OUR PAYMENT! - if next_hop_data.data.amt_to_forward != msg.amount_msat { - return_err!("Upstream node sent less than we were supposed to receive in payment", 19, &byte_utils::be64_to_array(msg.amount_msat)); + // TODO: incorrect_payment_amount + //if expected_payment < next_hop_data.amt_to_forward || expected_payment > 2*next_hop_data.amt_to_forward { + // return_err!("Payment is incorrect for the payment hash", 0x4000|16, &[0;0]); + //} + // final_expiry_too_soon + if (msg.cltv_expiry as u64) < self.latest_block_height.load(Ordering::Acquire) as u64 + (CLTV_CLAIM_BUFFER + HTLC_FAIL_TIMEOUT_BLOCKS) as u64 { + return_err!("The final CLTV expiry is too soon to handle", 17, &[0;0]); } + // final_incorrect_cltv_expiry if next_hop_data.data.outgoing_cltv_value != msg.cltv_expiry { return_err!("Upstream node set CLTV to the wrong value", 18, &byte_utils::be32_to_array(msg.cltv_expiry)); } + // final_incorrect_htlc_amount + if next_hop_data.data.amt_to_forward > msg.amount_msat { + return_err!("Upstream node sent less than we were supposed to receive in payment", 19, &byte_utils::be64_to_array(msg.amount_msat)); + } // Note that we could obviously respond immediately with an update_fulfill_htlc // message, however that would leak that we are the recipient of this payment, so @@ -948,29 +991,49 @@ impl ChannelManager { if onion_packet.is_some() { // If short_channel_id is 0 here, we'll reject them in the body here let id_option = channel_state.as_ref().unwrap().short_to_id.get(&short_channel_id).cloned(); let forwarding_id = match id_option { - None => { + None => { // unknown_next_peer return_err!("Don't have available channel for forwarding as requested.", 0x4000 | 10, &[0;0]); }, Some(id) => id.clone(), }; - if let Some((err, code, chan_update)) = { + if let Some((err, code, chan_update)) = loop { let chan = channel_state.as_mut().unwrap().by_id.get_mut(&forwarding_id).unwrap(); - if !chan.is_live() { - Some(("Forwarding channel is not in a ready state.", 0x1000 | 7, self.get_channel_update(chan).unwrap())) - } else { - let fee = amt_to_forward.checked_mul(self.fee_proportional_millionths as u64).and_then(|prop_fee| { (prop_fee / 1000000).checked_add(chan.get_our_fee_base_msat(&*self.fee_estimator) as u64) }); - if fee.is_none() || msg.amount_msat < fee.unwrap() || (msg.amount_msat - fee.unwrap()) < *amt_to_forward { - Some(("Prior hop has deviated from specified fees parameters or origin node has obsolete ones", 0x1000 | 12, self.get_channel_update(chan).unwrap())) - } else { - if (msg.cltv_expiry as u64) < (*outgoing_cltv_value) as u64 + CLTV_EXPIRY_DELTA as u64 { - Some(("Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta", 0x1000 | 13, self.get_channel_update(chan).unwrap())) - } else { - None - } - } + + if !chan.is_live() { // temporary_channel_failure + break Some(("Forwarding channel is not in a ready state.", 0x1000 | 20, Some(self.get_channel_update(chan).unwrap()))); + } + if *amt_to_forward < chan.get_their_htlc_minimum_msat() { // amount_below_minimum + break Some(("HTLC amount was below the htlc_minimum_msat", 0x1000 | 11, Some(self.get_channel_update(chan).unwrap()))); + } + let fee = amt_to_forward.checked_mul(self.fee_proportional_millionths as u64).and_then(|prop_fee| { (prop_fee / 1000000).checked_add(chan.get_our_fee_base_msat(&*self.fee_estimator) as u64) }); + if fee.is_none() || msg.amount_msat < fee.unwrap() || (msg.amount_msat - fee.unwrap()) < *amt_to_forward { // fee_insufficient + break Some(("Prior hop has deviated from specified fees parameters or origin node has obsolete ones", 0x1000 | 12, Some(self.get_channel_update(chan).unwrap()))); + } + if (msg.cltv_expiry as u64) < (*outgoing_cltv_value) as u64 + CLTV_EXPIRY_DELTA as u64 { // incorrect_cltv_expiry + break Some(("Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta", 0x1000 | 13, Some(self.get_channel_update(chan).unwrap()))); + } + let cur_height = self.latest_block_height.load(Ordering::Acquire) as u32 + 1; + // We want to have at least HTLC_FAIL_TIMEOUT_BLOCKS to fail prior to going on chain CLAIM_BUFFER blocks before expiration + if msg.cltv_expiry <= cur_height + CLTV_CLAIM_BUFFER + HTLC_FAIL_TIMEOUT_BLOCKS as u32 { // expiry_too_soon + break Some(("CLTV expiry is too close", 0x1000 | 14, Some(self.get_channel_update(chan).unwrap()))); + } + if msg.cltv_expiry > cur_height + CLTV_FAR_FAR_AWAY as u32 { // expiry_too_far + break Some(("CLTV expiry is too far in the future", 21, None)); } - } { - return_err!(err, code, &chan_update.encode_with_len()[..]); + break None; + } + { + let mut res = Vec::with_capacity(8 + 128); + if code == 0x1000 | 11 || code == 0x1000 | 12 { + res.extend_from_slice(&byte_utils::be64_to_array(msg.amount_msat)); + } + else if code == 0x1000 | 13 { + res.extend_from_slice(&byte_utils::be32_to_array(msg.cltv_expiry)); + } + if let Some(chan_update) = chan_update { + res.extend_from_slice(&chan_update.encode_with_len()[..]); + } + return_err!(err, code, &res[..]); } } } @@ -1035,11 +1098,15 @@ impl ChannelManager { } } - let session_priv = SecretKey::from_slice(&self.secp_ctx, &{ - let mut session_key = [0; 32]; - rng::fill_bytes(&mut session_key); - session_key - }).expect("RNG is bad!"); + let session_priv = if cfg!(test) { + SecretKey::from_slice(&self.secp_ctx, &[3; 32]).unwrap() + } else { + SecretKey::from_slice(&self.secp_ctx, &{ + let mut session_key = [0; 32]; + rng::fill_bytes(&mut session_key); + session_key + }).expect("RNG is bad!") + }; let cur_height = self.latest_block_height.load(Ordering::Acquire) as u32 + 1; @@ -1068,6 +1135,10 @@ impl ChannelManager { chan.send_htlc_and_commit(htlc_msat, payment_hash.clone(), htlc_cltv, HTLCSource::OutboundRoute { route: route.clone(), session_priv: session_priv.clone(), + payment_data: PaymentData { + htlc_msat: htlc_msat, + block_height: cur_height, + } }, onion_packet).map_err(|he| APIError::ChannelUnavailable{err: he.err})? }; @@ -1328,12 +1399,25 @@ impl ChannelManager { /// still-available channels. fn fail_htlc_backwards_internal(&self, mut channel_state: MutexGuard, source: HTLCSource, payment_hash: &[u8; 32], onion_error: HTLCFailReason) { match source { - HTLCSource::OutboundRoute { .. } => { + HTLCSource::OutboundRoute { ref route, ref session_priv, ref payment_data } => { mem::drop(channel_state); + let data = if let HTLCFailReason::ErrorPacket { ref err } = onion_error{ + err.data.clone() + } else { + panic!("should have onion error packet here"); + }; + let (channel_update, payment_retryable, error_code) = self.process_onion_failure(route, data, session_priv, payment_data).unwrap(); //TODO: why unwrap here? let mut pending_events = self.pending_events.lock().unwrap(); + if let Some(channel_update) = channel_update { + pending_events.push(events::Event::RouteUpdate { + update: channel_update, + }); + } pending_events.push(events::Event::PaymentFailed { - payment_hash: payment_hash.clone() + payment_hash: payment_hash.clone(), + retryable: payment_retryable, + error_code: error_code, }); }, HTLCSource::PreviousHopData(HTLCPreviousHopData { short_channel_id, htlc_id, incoming_packet_shared_secret }) => { @@ -1350,7 +1434,7 @@ impl ChannelManager { let (node_id, fail_msgs) = { let chan_id = match channel_state.short_to_id.get(&short_channel_id) { Some(chan_id) => chan_id.clone(), - None => return + None => return, }; let chan = channel_state.by_id.get_mut(&chan_id).unwrap(); @@ -1387,7 +1471,7 @@ impl ChannelManager { None => {}, } }, - } + }; } /// Provides a payment preimage in response to a PaymentReceived event, returning true and @@ -1748,9 +1832,225 @@ impl ChannelManager { Ok(()) } - fn internal_update_fail_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFailHTLC) -> Result, MsgHandleErrInternal> { + // Process onion peacket processed in only in the origin node. Returns update + // for router and boolean flag indicating if payment can be retried + fn process_onion_failure(&self, route: &Route, mut packet_decrypted: Vec, session_priv: &SecretKey, payment_data: &PaymentData) -> Result<(Option, bool, Option), secp256k1::Error> { + + macro_rules! onion_failure_log { + ( $error_code_textual: expr, $error_code: expr, $reported_name: expr, $reported_value: expr ) => { + log_trace!(self, "{}({:#x}) {}({})", $error_code_textual, $error_code, $reported_name, $reported_value); + }; + ( $error_code_textual: expr, $error_code: expr ) => { + log_trace!(self, "{}({})", $error_code_textual, $error_code); + }; + } + + const BADONION: u16 = 0x8000; + const PERM: u16 = 0x4000; + const UPDATE: u16 = 0x1000; + + let mut res = None; + let mut htlc_msat = payment_data.htlc_msat; + let mut outgoing_cltv_value = payment_data.block_height; + + // Handle packed channel/node updates for passing back for the route handler + Self::construct_onion_keys_callback(&self.secp_ctx, &route, &session_priv, |shared_secret, _, _, route_hop| { + if res.is_some() { return; } + + let incoming_htlc_msat = htlc_msat; + let amt_to_forward = htlc_msat - route_hop.fee_msat; + htlc_msat = amt_to_forward; + outgoing_cltv_value += route_hop.cltv_expiry_delta; + + let ammag = ChannelManager::gen_ammag_from_shared_secret(&shared_secret); + + let mut decryption_tmp = Vec::with_capacity(packet_decrypted.len()); + decryption_tmp.resize(packet_decrypted.len(), 0); + let mut chacha = ChaCha20::new(&ammag, &[0u8; 8]); + chacha.process(&packet_decrypted, &mut decryption_tmp[..]); + packet_decrypted = decryption_tmp; + + let is_from_final_node = route.hops.last().unwrap().pubkey == route_hop.pubkey; + + match msgs::DecodedOnionErrorPacket::read(&mut Cursor::new(&packet_decrypted)) { + Err(_e) => { + //TODO: error case + return; + }, + Ok(ref err_packet) if err_packet.failuremsg.len() < 2 => { + //TODO: no errorcode? + return; + }, + Ok(ref err_packet) if err_packet.failuremsg.len() >= 2 => { + let um = ChannelManager::gen_um_from_shared_secret(&shared_secret); + + let mut hmac = Hmac::new(Sha256::new(), &um); + hmac.input(&err_packet.encode()[32..]); + let mut calc_tag = [0u8; 32]; + hmac.raw_result(&mut calc_tag); + + if crypto::util::fixed_time_eq(&calc_tag, &err_packet.hmac) { + let error_code = byte_utils::slice_to_be16(&err_packet.failuremsg[0..2]); + + match error_code & 0xff { + 1|2|3 => { + // either from an intermediate or final node + // invalid_realm(PERM|1), + // temporary_node_failure(NODE|2) + // permanent_node_failure(PERM|NODE|2) + // required_node_feature_mssing(PERM|NODE|3) + res = Some((Some(msgs::HTLCFailChannelUpdate::NodeFailure { + node_id: route_hop.pubkey, + is_permanent: error_code & PERM == PERM, + }), !(error_code & PERM == PERM && is_from_final_node), Some(error_code))); + // node returning invalid_realm is removed from network_map, + // although NODE flag is not set, TODO: or remove channel only? + // retry payment when removed node is not a final node + return; + }, + _ => {} + } + + if is_from_final_node { + let payment_retryable = match error_code { + c if c == PERM|15 => false, // unknown_payment_hash + c if c == PERM|16 => false, // incorrect_payment_amount + 17 => true, // final_expiry_too_soon + 18 if err_packet.failuremsg.len() == 6 => { // final_incorrect_cltv_expiry + let _reported_cltv_expiry = byte_utils::slice_to_be16(&err_packet.failuremsg[2..2+4]); + true + }, + 19 if err_packet.failuremsg.len() == 10 => { // final_incorrect_htlc_amount + let _reported_incoming_htlc_msat = byte_utils::slice_to_be16(&err_packet.failuremsg[2..2+8]); + true + }, + _ => { + // A final node has sent us either an invalid code or an error_code that + // MUST be sent from the processing node, or the formmat of failuremsg + // does not coform to the spec. + // Remove it from the network map and don't may retry payment + res = Some((Some(msgs::HTLCFailChannelUpdate::NodeFailure { + node_id: route_hop.pubkey, + is_permanent: true, + }), false, None)); + return; + } + }; + debug_assert_eq!(payment_retryable, error_code&PERM != PERM); + res = Some((None, payment_retryable, Some(error_code))); + return; + } + + // now, error_code should be only from the intermediate nodes + match error_code { + _c if error_code & PERM == PERM => { + match error_code { + c if c == BADONION|PERM|4 => {}, // invalid_onion_version + c if c == BADONION|PERM|5 => {}, // invalid_onion_hmac + c if c == BADONION|PERM|6 => {}, // invalid_onion_key + _ => { + //TODO: bogus error code handling + }, + } + res = Some((Some(msgs::HTLCFailChannelUpdate::ChannelClosed { + short_channel_id: route_hop.short_channel_id, + is_permanent: true, + }), true, Some(error_code))); + return; + }, + _c if error_code & UPDATE == UPDATE => { + let offset = match error_code { + c if c == UPDATE|7 => 0, // temporary_channel_failure + c if c == UPDATE|11 => 8, // amount_below_minimum + c if c == UPDATE|12 => 8, // fee_insufficient + c if c == UPDATE|13 => 4, // incorrect_cltv_expiry + c if c == UPDATE|14 => 0, // expiry_too_soon + c if c == UPDATE|20 => 2, // channel_disabled + _ => { + // node sending unknown code + res = Some((Some(msgs::HTLCFailChannelUpdate::NodeFailure { + node_id: route_hop.pubkey, + is_permanent: true, + }), false, Some(error_code))); + return; + } + }; + + if err_packet.failuremsg.len() >= offset + 2 { + let update_len = byte_utils::slice_to_be16(&err_packet.failuremsg[offset+2..offset+4]) as usize; + if err_packet.failuremsg.len() >= offset + 2 + update_len { + if let Ok(chan_update) = msgs::ChannelUpdate::read(&mut Cursor::new(&err_packet.failuremsg[offset+4..offset+4+update_len])) { + + // check if channel_update is newer than the one used + // when calculating the route + if chan_update.contents.timestamp <= route_hop.channel_update_timestamp { + res = Some((None, true, Some(error_code))); + return; + } + // if channel_update should NOT have caused the failure: + // MAY treat the channel_update as invalid. + let is_chan_update_invalid = match error_code { + c if c == UPDATE|11 => { // amount_below_minimum + let reported_htlc_msat = byte_utils::slice_to_be64(&err_packet.failuremsg[..offset]); + onion_failure_log!("amount_below_minimum", UPDATE|11, "htlc_msat", reported_htlc_msat); + incoming_htlc_msat > chan_update.contents.htlc_minimum_msat + }, + c if c == UPDATE|12 => { // fee_insufficient + let reported_htlc_msat = byte_utils::slice_to_be64(&err_packet.failuremsg[..offset]); + let new_fee = amt_to_forward.checked_mul(chan_update.contents.fee_proportional_millionths as u64).and_then(|prop_fee| { (prop_fee / 1000000).checked_add(chan_update.contents.fee_base_msat as u64) }); + onion_failure_log!("fee_insufficient", UPDATE|12, "htlc_msat", reported_htlc_msat); + new_fee.is_none() || incoming_htlc_msat >= new_fee.unwrap() && incoming_htlc_msat >= amt_to_forward + new_fee.unwrap() + } + c if c == UPDATE|13 => { // incorrect_cltv_expiry + eprintln!("offset({})", offset); + let reported_cltv_expiry = byte_utils::slice_to_be32(&err_packet.failuremsg[..offset]); + onion_failure_log!("incorrect_cltv_expiry", UPDATE|13, "cltv_expiry", reported_cltv_expiry); + route_hop.cltv_expiry_delta as u16 >= chan_update.contents.cltv_expiry_delta + }, + c if c == UPDATE|20 => { // channel_disabled + let reported_flags = byte_utils::slice_to_be16(&err_packet.failuremsg[..offset]); + onion_failure_log!("channel_disabled", UPDATE|20, "flags", reported_flags); + chan_update.contents.flags & 0x01 == 0x01 + }, + c if c == UPDATE|21 => true, // expiry_too_far + _ => { unreachable!(); }, + }; + + let msg = if is_chan_update_invalid { None } else { + Some(msgs::HTLCFailChannelUpdate::ChannelUpdateMessage { + msg: chan_update, + }) + }; + res = Some((msg, true, Some(error_code))); + return; + } + } + } + }, + 21 => { // expiry_too_far + res = Some((None, true, Some(error_code))); + return; + } + _ => { + // node sending unknown code + res = Some((Some(msgs::HTLCFailChannelUpdate::NodeFailure { + node_id: route_hop.pubkey, + is_permanent: true, + }), false, None)); + return; + } + } + } + }, + _ => { unreachable!() } + } + })?; + Ok(res.unwrap()) + } + + fn internal_update_fail_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFailHTLC) -> Result<(), MsgHandleErrInternal> { let mut channel_state = self.channel_state.lock().unwrap(); - let htlc_source = match channel_state.by_id.get_mut(&msg.channel_id) { + match channel_state.by_id.get_mut(&msg.channel_id) { Some(chan) => { if chan.get_their_node_id() != *their_node_id { //TODO: here and below MsgHandleErrInternal, #153 case @@ -1761,64 +2061,7 @@ impl ChannelManager { }, None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) }?; - - match htlc_source { - &HTLCSource::OutboundRoute { ref route, ref session_priv, .. } => { - // Handle packed channel/node updates for passing back for the route handler - let mut packet_decrypted = msg.reason.data.clone(); - let mut res = None; - Self::construct_onion_keys_callback(&self.secp_ctx, &route, &session_priv, |shared_secret, _, _, route_hop| { - if res.is_some() { return; } - - let ammag = ChannelManager::gen_ammag_from_shared_secret(&shared_secret); - - let mut decryption_tmp = Vec::with_capacity(packet_decrypted.len()); - decryption_tmp.resize(packet_decrypted.len(), 0); - let mut chacha = ChaCha20::new(&ammag, &[0u8; 8]); - chacha.process(&packet_decrypted, &mut decryption_tmp[..]); - packet_decrypted = decryption_tmp; - - if let Ok(err_packet) = msgs::DecodedOnionErrorPacket::read(&mut Cursor::new(&packet_decrypted)) { - if err_packet.failuremsg.len() >= 2 { - let um = ChannelManager::gen_um_from_shared_secret(&shared_secret); - - let mut hmac = Hmac::new(Sha256::new(), &um); - hmac.input(&err_packet.encode()[32..]); - let mut calc_tag = [0u8; 32]; - hmac.raw_result(&mut calc_tag); - if crypto::util::fixed_time_eq(&calc_tag, &err_packet.hmac) { - const UNKNOWN_CHAN: u16 = 0x4000|10; - const TEMP_CHAN_FAILURE: u16 = 0x4000|7; - match byte_utils::slice_to_be16(&err_packet.failuremsg[0..2]) { - TEMP_CHAN_FAILURE => { - if err_packet.failuremsg.len() >= 4 { - let update_len = byte_utils::slice_to_be16(&err_packet.failuremsg[2..4]) as usize; - if err_packet.failuremsg.len() >= 4 + update_len { - if let Ok(chan_update) = msgs::ChannelUpdate::read(&mut Cursor::new(&err_packet.failuremsg[4..4 + update_len])) { - res = Some(msgs::HTLCFailChannelUpdate::ChannelUpdateMessage { - msg: chan_update, - }); - } - } - } - }, - UNKNOWN_CHAN => { - // No such next-hop. We know this came from the - // current node as the HMAC validated. - res = Some(msgs::HTLCFailChannelUpdate::ChannelClosed { - short_channel_id: route_hop.short_channel_id - }); - }, - _ => {}, //TODO: Enumerate all of these! - } - } - } - } - }).unwrap(); - Ok(res) - }, - _ => { Ok(None) }, - } + Ok(()) } fn internal_update_fail_malformed_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFailMalformedHTLC) -> Result<(), MsgHandleErrInternal> { @@ -2223,7 +2466,7 @@ impl ChannelMessageHandler for ChannelManager { handle_error!(self, self.internal_update_fulfill_htlc(their_node_id, msg), their_node_id) } - fn handle_update_fail_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFailHTLC) -> Result, HandleError> { + fn handle_update_fail_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFailHTLC) -> Result<(), HandleError> { handle_error!(self, self.internal_update_fail_htlc(their_node_id, msg), their_node_id) } @@ -2351,11 +2594,12 @@ mod tests { use chain::chaininterface; use chain::transaction::OutPoint; use chain::chaininterface::ChainListener; - use ln::channelmanager::{ChannelManager,OnionKeys}; + use ln::channelmanager::{ChannelManager,OnionKeys,HTLCSource}; + use ln::channelmonitor::{CLTV_CLAIM_BUFFER, HTLC_FAIL_TIMEOUT_BLOCKS}; use ln::router::{Route, RouteHop, Router}; use ln::msgs; - use ln::msgs::{ChannelMessageHandler,RoutingMessageHandler}; - use util::test_utils; + use ln::msgs::{ChannelMessageHandler,RoutingMessageHandler,HTLCFailChannelUpdate}; + use util::{test_utils, rng}; use util::events::{Event, EventsProvider}; use util::errors::APIError; use util::logger::Logger; @@ -2384,6 +2628,7 @@ mod tests { use std::default::Default; use std::rc::Rc; use std::sync::{Arc, Mutex}; + use std::sync::atomic::Ordering; use std::time::Instant; use std::mem; @@ -2395,23 +2640,23 @@ mod tests { hops: vec!( RouteHop { pubkey: PublicKey::from_slice(&secp_ctx, &hex::decode("02eec7245d6b7d2ccb30380bfbe2a3648cd7a942653f5aa340edcea1f283686619").unwrap()[..]).unwrap(), - short_channel_id: 0, fee_msat: 0, cltv_expiry_delta: 0 // Test vectors are garbage and not generateble from a RouteHop, we fill in payloads manually + short_channel_id: 0, fee_msat: 0, cltv_expiry_delta: 0, channel_update_timestamp: 0 // Test vectors are garbage and not generateble from a RouteHop, we fill in payloads manually }, RouteHop { pubkey: PublicKey::from_slice(&secp_ctx, &hex::decode("0324653eac434488002cc06bbfb7f10fe18991e35f9fe4302dbea6d2353dc0ab1c").unwrap()[..]).unwrap(), - short_channel_id: 0, fee_msat: 0, cltv_expiry_delta: 0 // Test vectors are garbage and not generateble from a RouteHop, we fill in payloads manually + short_channel_id: 0, fee_msat: 0, cltv_expiry_delta: 0, channel_update_timestamp: 0 // Test vectors are garbage and not generateble from a RouteHop, we fill in payloads manually }, RouteHop { pubkey: PublicKey::from_slice(&secp_ctx, &hex::decode("027f31ebc5462c1fdce1b737ecff52d37d75dea43ce11c74d25aa297165faa2007").unwrap()[..]).unwrap(), - short_channel_id: 0, fee_msat: 0, cltv_expiry_delta: 0 // Test vectors are garbage and not generateble from a RouteHop, we fill in payloads manually + short_channel_id: 0, fee_msat: 0, cltv_expiry_delta: 0, channel_update_timestamp: 0 // Test vectors are garbage and not generateble from a RouteHop, we fill in payloads manually }, RouteHop { pubkey: PublicKey::from_slice(&secp_ctx, &hex::decode("032c0b7cf95324a07d05398b240174dc0c2be444d96b159aa6c7f7b1e668680991").unwrap()[..]).unwrap(), - short_channel_id: 0, fee_msat: 0, cltv_expiry_delta: 0 // Test vectors are garbage and not generateble from a RouteHop, we fill in payloads manually + short_channel_id: 0, fee_msat: 0, cltv_expiry_delta: 0, channel_update_timestamp: 0 // Test vectors are garbage and not generateble from a RouteHop, we fill in payloads manually }, RouteHop { pubkey: PublicKey::from_slice(&secp_ctx, &hex::decode("02edabbd16b41c8371b92ef2f04c1185b4f03b6dcd52ba9b78d9d7c89c8f221145").unwrap()[..]).unwrap(), - short_channel_id: 0, fee_msat: 0, cltv_expiry_delta: 0 // Test vectors are garbage and not generateble from a RouteHop, we fill in payloads manually + short_channel_id: 0, fee_msat: 0, cltv_expiry_delta: 0, channel_update_timestamp: 0 // Test vectors are garbage and not generateble from a RouteHop, we fill in payloads manually }, ), }; @@ -3093,7 +3338,7 @@ mod tests { let events = origin_node.node.get_and_clear_pending_events(); assert_eq!(events.len(), 1); match events[0] { - Event::PaymentFailed { payment_hash } => { + Event::PaymentFailed { payment_hash, .. } => { assert_eq!(payment_hash, our_payment_hash); }, _ => panic!("Unexpected event"), @@ -3700,19 +3945,22 @@ mod tests { pubkey: nodes[2].node.get_our_node_id(), short_channel_id: chan_2.0.contents.short_channel_id, fee_msat: 0, - cltv_expiry_delta: chan_3.0.contents.cltv_expiry_delta as u32 + cltv_expiry_delta: chan_3.0.contents.cltv_expiry_delta as u32, + channel_update_timestamp: 0, }); hops.push(RouteHop { pubkey: nodes[3].node.get_our_node_id(), short_channel_id: chan_3.0.contents.short_channel_id, fee_msat: 0, - cltv_expiry_delta: chan_4.1.contents.cltv_expiry_delta as u32 + cltv_expiry_delta: chan_4.1.contents.cltv_expiry_delta as u32, + channel_update_timestamp: 0, }); hops.push(RouteHop { pubkey: nodes[1].node.get_our_node_id(), short_channel_id: chan_4.0.contents.short_channel_id, fee_msat: 1000000, cltv_expiry_delta: TEST_FINAL_CLTV, + channel_update_timestamp: 0, }); hops[1].fee_msat = chan_4.1.contents.fee_base_msat as u64 + chan_4.1.contents.fee_proportional_millionths as u64 * hops[2].fee_msat as u64 / 1000000; hops[0].fee_msat = chan_3.0.contents.fee_base_msat as u64 + chan_3.0.contents.fee_proportional_millionths as u64 * hops[1].fee_msat as u64 / 1000000; @@ -3723,19 +3971,22 @@ mod tests { pubkey: nodes[3].node.get_our_node_id(), short_channel_id: chan_4.0.contents.short_channel_id, fee_msat: 0, - cltv_expiry_delta: chan_3.1.contents.cltv_expiry_delta as u32 + cltv_expiry_delta: chan_3.1.contents.cltv_expiry_delta as u32, + channel_update_timestamp: 0, }); hops.push(RouteHop { pubkey: nodes[2].node.get_our_node_id(), short_channel_id: chan_3.0.contents.short_channel_id, fee_msat: 0, - cltv_expiry_delta: chan_2.1.contents.cltv_expiry_delta as u32 + cltv_expiry_delta: chan_2.1.contents.cltv_expiry_delta as u32, + channel_update_timestamp: 0, }); hops.push(RouteHop { pubkey: nodes[1].node.get_our_node_id(), short_channel_id: chan_2.0.contents.short_channel_id, fee_msat: 1000000, cltv_expiry_delta: TEST_FINAL_CLTV, + channel_update_timestamp: 0, }); hops[1].fee_msat = chan_2.1.contents.fee_base_msat as u64 + chan_2.1.contents.fee_proportional_millionths as u64 * hops[2].fee_msat as u64 / 1000000; hops[0].fee_msat = chan_3.1.contents.fee_base_msat as u64 + chan_3.1.contents.fee_proportional_millionths as u64 * hops[1].fee_msat as u64 / 1000000; @@ -4216,6 +4467,9 @@ mod tests { assert_eq!(nodes[0].node.list_channels().len(), 0); assert_eq!(nodes[1].node.list_channels().len(), 1); + let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; + nodes[4].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![] }, 1); + // One pending HTLC is discarded by the force-close: let payment_preimage_1 = route_payment(&nodes[1], &vec!(&nodes[2], &nodes[3])[..], 3000000).0; @@ -4269,13 +4523,17 @@ mod tests { assert_eq!(nodes[2].node.list_channels().len(), 0); assert_eq!(nodes[3].node.list_channels().len(), 1); + assert_eq!(nodes[3].node.latest_block_height.load(Ordering::Acquire), 1); + assert_eq!(nodes[4].node.latest_block_height.load(Ordering::Acquire), 1); // One pending HTLC to time out: let payment_preimage_2 = route_payment(&nodes[3], &vec!(&nodes[4])[..], 3000000).0; + // CLTV expires at TEST_FINAL_CLTV + 1 (current height) + 1 (added in send_payment for + // buffer space). { let mut header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; - nodes[3].chain_monitor.block_connected_checked(&header, 1, &Vec::new()[..], &[0; 0]); - for i in 2..TEST_FINAL_CLTV - 3 { + nodes[3].chain_monitor.block_connected_checked(&header, 2, &Vec::new()[..], &[0; 0]); + for i in 3..TEST_FINAL_CLTV + 2 + HTLC_FAIL_TIMEOUT_BLOCKS + 1 { header = BlockHeader { version: 0x20000000, prev_blockhash: header.bitcoin_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; nodes[3].chain_monitor.block_connected_checked(&header, i, &Vec::new()[..], &[0; 0]); } @@ -4286,8 +4544,8 @@ mod tests { claim_funds!(nodes[4], nodes[3], payment_preimage_2); header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; - nodes[4].chain_monitor.block_connected_checked(&header, 1, &Vec::new()[..], &[0; 0]); - for i in 2..TEST_FINAL_CLTV - 3 { + nodes[4].chain_monitor.block_connected_checked(&header, 2, &Vec::new()[..], &[0; 0]); + for i in 3..TEST_FINAL_CLTV + 2 - CLTV_CLAIM_BUFFER + 1 { header = BlockHeader { version: 0x20000000, prev_blockhash: header.bitcoin_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; nodes[4].chain_monitor.block_connected_checked(&header, i, &Vec::new()[..], &[0; 0]); } @@ -4852,7 +5110,7 @@ mod tests { _ => panic!("Unexpected event"), } match events[1] { - Event::PaymentFailed { payment_hash } => { + Event::PaymentFailed { payment_hash, .. } => { assert_eq!(payment_hash, payment_hash_5); }, _ => panic!("Unexpected event"), @@ -5115,7 +5373,7 @@ mod tests { let as_chan = a_channel_lock.by_id.get(&chan_announcement.3).unwrap(); let bs_chan = b_channel_lock.by_id.get(&chan_announcement.3).unwrap(); - let _ = nodes[0].router.handle_htlc_fail_channel_update(&msgs::HTLCFailChannelUpdate::ChannelClosed { short_channel_id : as_chan.get_short_channel_id().unwrap() } ); + let _ = nodes[0].router.handle_htlc_fail_channel_update(&msgs::HTLCFailChannelUpdate::ChannelClosed { short_channel_id : as_chan.get_short_channel_id().unwrap(), is_permanent: false } ); let as_bitcoin_key = PublicKey::from_secret_key(&secp_ctx, &as_chan.get_local_keys().funding_key); let bs_bitcoin_key = PublicKey::from_secret_key(&secp_ctx, &bs_chan.get_local_keys().funding_key); @@ -5162,7 +5420,7 @@ mod tests { let unsigned_msg = dummy_unsigned_msg!(); sign_msg!(unsigned_msg); assert_eq!(nodes[0].router.handle_channel_announcement(&chan_announcement).unwrap(), true); - let _ = nodes[0].router.handle_htlc_fail_channel_update(&msgs::HTLCFailChannelUpdate::ChannelClosed { short_channel_id : as_chan.get_short_channel_id().unwrap() } ); + let _ = nodes[0].router.handle_htlc_fail_channel_update(&msgs::HTLCFailChannelUpdate::ChannelClosed { short_channel_id : as_chan.get_short_channel_id().unwrap(), is_permanent: false } ); // Configured with Network::Testnet let mut unsigned_msg = dummy_unsigned_msg!(); @@ -5175,4 +5433,249 @@ mod tests { sign_msg!(unsigned_msg); assert!(nodes[0].router.handle_channel_announcement(&chan_announcement).is_err()); } + + fn run_onion_failure_test(_name: &str, test_case: u8, nodes: &Vec, channels: &[(msgs::ChannelUpdate, msgs::ChannelUpdate, [u8; 32], Transaction)], route: &Route, payment_hash: &[u8; 32], mut callback1: F1, mut callback2: F2, expected_retryable: bool, expected_error_code: Option, expected_channel_update: Option) + where F1: for <'a> FnMut(&'a mut msgs::UpdateAddHTLC), + F2: for <'a> FnMut(&'a mut msgs::UpdateAddHTLC), + { + use ln::msgs::HTLCFailChannelUpdate; + + // reset block height + let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; + for ix in 0..nodes.len() { + nodes[ix].chain_monitor.block_connected_checked(&header, 1, &Vec::new()[..], &[0; 0]); + } + + macro_rules! expect_update_htlc_event { + ($node: expr) => {{ + let events = $node.node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + match events[0] { + Event::UpdateHTLCs { node_id:_, ref updates } => { }, + _ => panic!("Unexpected event"), + }; + $node.node.channel_state.lock().unwrap().next_forward = Instant::now(); + $node.node.process_pending_htlc_forwards(); + }} + } + + macro_rules! expect_pending_htlcs_forwardable { + ($node: expr) => {{ + let events = $node.node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + match events[0] { + Event::PendingHTLCsForwardable { .. } => { }, + _ => panic!("Unexpected event"), + }; + $node.node.channel_state.lock().unwrap().next_forward = Instant::now(); + $node.node.process_pending_htlc_forwards(); + }} + }; + + macro_rules! expect_pending_htlcs_forwardable { + ($node: expr) => {{ + let events = $node.node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + match events[0] { + Event::PendingHTLCsForwardable { .. } => { }, + _ => panic!("Unexpected event"), + }; + $node.node.channel_state.lock().unwrap().next_forward = Instant::now(); + $node.node.process_pending_htlc_forwards(); + }} + }; + + macro_rules! expect_forward_event { + ($node: expr) => {{ + let mut events = $node.node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + check_added_monitors!($node, 1); + SendEvent::from_event(events.remove(0)) + }} + }; + + macro_rules! expect_forward_event { + ($node: expr) => {{ + let mut events = $node.node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + check_added_monitors!($node, 1); + SendEvent::from_event(events.remove(0)) + }} + }; + + macro_rules! expect_fail_backward_event { + ($node: expr) => {{ + let events = $node.node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + match events[0] { + Event::UpdateHTLCs { node_id:_, updates: msgs::CommitmentUpdate { update_add_htlcs:_, update_fulfill_htlcs:_, ref update_fail_htlcs, update_fail_malformed_htlcs:_, update_fee:_, ref commitment_signed } } => { + assert_eq!(update_fail_htlcs.len(),1); + (update_fail_htlcs[0].clone(), commitment_signed.clone()) + }, + _ => panic!("Unexpected event type!"), + } + }} + }; + + nodes[0].node.send_payment(route.clone(), payment_hash.clone()).unwrap(); + let payment_event = expect_forward_event!(nodes[0]); + let mut update_add_1 = payment_event.msgs[0].clone(); + + callback1(&mut update_add_1); + + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &update_add_1).unwrap(); + + // 0 => 1 + let (as_revoke_and_ack, as_commitment_signed) = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg).unwrap(); + check_added_monitors!(nodes[1], 1); + assert!(nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_revoke_and_ack).unwrap().is_none()); + check_added_monitors!(nodes[0], 1); + let (bs_revoke_and_ack, bs_none) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &as_commitment_signed.unwrap()).unwrap(); + assert!(bs_none.is_none()); + check_added_monitors!(nodes[0], 1); + let commitment_update = nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &bs_revoke_and_ack).unwrap(); + check_added_monitors!(nodes[1], 1); + + let (update_fail_htlc, commitment_signed) = match test_case { + 0 => { // intermediat node failure + assert!(commitment_update.is_some()); + let commitment_update = commitment_update.unwrap(); + assert!(commitment_update.update_fail_htlcs.len() == 1); + (commitment_update.update_fail_htlcs[0].clone(), commitment_update.commitment_signed) + }, + 1 => { // final node failure + assert!(commitment_update.is_none()); + expect_pending_htlcs_forwardable!(&nodes[1]); + + let ref payment_event = expect_forward_event!(nodes[1]); + let mut update_add_2 = payment_event.msgs[0].clone(); + + callback2(&mut update_add_2); + + nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &update_add_2).unwrap(); + let (as_revoke_and_ack, as_commitment_signed) = nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &payment_event.commitment_msg).unwrap(); + check_added_monitors!(nodes[2], 1); + assert!(nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &as_revoke_and_ack).unwrap().is_none()); + + check_added_monitors!(nodes[1], 1); + let (bs_revoke_and_ack, bs_none) = nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &as_commitment_signed.unwrap()).unwrap(); + assert!(bs_none.is_none()); + check_added_monitors!(nodes[1], 1); + let commitment_update = nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack).unwrap().unwrap(); + check_added_monitors!(nodes[2], 1); + + assert!(commitment_update.update_fail_htlcs.len() == 1); + nodes[1].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &commitment_update.update_fail_htlcs[0]).unwrap(); + commitment_signed_dance!(nodes[1], nodes[2], commitment_update.commitment_signed, true); + + let (update_fail_htlc_msg, commitment_signed) = expect_fail_backward_event!(nodes[1]); + (update_fail_htlc_msg, commitment_signed) + }, + _ => unreachable!(), + }; + + // origin node fail handling + nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlc).unwrap(); + commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, false); + + let events = nodes[0].node.get_and_clear_pending_events(); + //TODO assert_eq!(events.len(), 2); + for evt in events.iter() { + match evt { + &Event::PaymentFailed { payment_hash:_, ref retryable, ref error_code} => { + assert_eq!(*retryable, expected_retryable); + assert_eq!(*error_code, expected_error_code); + }, + &Event::RouteUpdate { ref update } => { + /* TODO check expected channel update */ + /* + expected_channel_update.unwrap() + assert!(expected_channel_update.is_some()); + if let Some(expected_update) = expected_channel_update { + assert!(fail_channel_update.is_some()); + match fail_channel_update.unwrap() { + expected_update => {}, + _ => panic!("Unexpected channel update"), + } + } + */ + }, + _ => panic!("Unexpected event"), + }; + } + } + + #[test] + fn test_onion_failure() { + use ln::msgs::ChannelUpdate; + use ln::channelmanager::CLTV_FAR_FAR_AWAY; + use bitcoin::blockdata::transaction::Transaction; + + const BADONION: u16 = 0x8000; + const PERM: u16 = 0x4000; + const NODE: u16 = 0x1000; + const UPDATE: u16 = 0x1000; + + let mut nodes = create_network(3); + let channels = [create_announced_chan_between_nodes(&nodes, 0, 1), create_announced_chan_between_nodes(&nodes, 1, 2)]; + let (_, payment_hash) = get_payment_preimage_hash!(nodes[0]); + let route = nodes[0].router.get_route(&nodes[2].node.get_our_node_id(), None, &Vec::new(), 40000, TEST_FINAL_CLTV).unwrap(); + // + run_onion_failure_test("invalid_realm", 0, &nodes, &channels, &route, &payment_hash, |msg| { + let session_priv = SecretKey::from_slice(&::secp256k1::Secp256k1::without_caps(), &[3; 32]).unwrap(); + let cur_height = nodes[0].node.latest_block_height.load(Ordering::Acquire) as u32 + 1; + let onion_keys = ChannelManager::construct_onion_keys(&Secp256k1::new(), &route, &session_priv).unwrap(); + let (onion_payloads, _htlc_msat, _htlc_cltv) = ChannelManager::build_onion_payloads_with_realm(&route, cur_height, 100).unwrap(); + let onion_packet = ChannelManager::construct_onion_packet(onion_payloads, onion_keys, &payment_hash); + msg.onion_routing_packet = onion_packet; + }, |_msg| {}, true, Some(PERM|1), None); + + //TODO temporary_node_failure(NODE|2) + //TODO permanent_node_failure(PERM|NODE|2) + //TODO required_node_feature_missing + run_onion_failure_test("invalid_onion_version", 0, &nodes, &channels, &route, &payment_hash, |msg| { msg.onion_routing_packet.version = 1; }, |_msg| {}, true, Some(BADONION|PERM|4), None); + run_onion_failure_test("invalid_onion_hmac", 0, &nodes, &channels, &route, &payment_hash, |msg| { msg.onion_routing_packet.hmac = [3; 32]; }, |_msg| {}, true, Some(BADONION|PERM|5), None); + //TODO invalid_onion_key + //TODO temporary_channel_failure(UPDATE|7) + //TODO permanent_channel_failure(PERM|8) + //TODO required_channel_feature_missing(PERM|9) + //TODO unknown_next_peer(PERM|10) + //TODO amount_below_minimum(UPDATE|11) + //TODO fee_insufficient(UPDATE|12) + run_onion_failure_test("incorrect_cltv_expiry", 0, &nodes, &channels, &route, &payment_hash, |msg| { + // need to violate: cltv_expiry - cltv_expiry_delta >= outgoing_cltv_value + msg.cltv_expiry -= 1; + }, |_msg| {}, true, Some(UPDATE|13), None); + run_onion_failure_test("expiry_too_soon", 0, &nodes, &channels, &route, &payment_hash, |msg| { + let height = msg.cltv_expiry - CLTV_CLAIM_BUFFER - HTLC_FAIL_TIMEOUT_BLOCKS + 1; + let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; + nodes[1].chain_monitor.block_connected_checked(&header, height, &Vec::new()[..], &[0; 0]); + }, |_msg| {}, true, Some(UPDATE|14), None); + // TODO: unknown_payment_hash (PERM|15) + // TODO: unknown_payment_amount (PERM|15) + run_onion_failure_test("final_expiry_too_soon", 1, &nodes, &channels, &route, &payment_hash, |_msg| {}, |msg| { + let height = msg.cltv_expiry - CLTV_CLAIM_BUFFER - HTLC_FAIL_TIMEOUT_BLOCKS + 1; + let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; + nodes[2].chain_monitor.block_connected_checked(&header, height, &Vec::new()[..], &[0; 0]); + }, true, Some(17), None); + run_onion_failure_test("final_incorrect_cltv_expiry", 1, &nodes, &channels, &route, &payment_hash, |_msg| {}, |msg| { msg.cltv_expiry += 1; }, true, Some(18), None); + /* TODO this raise 'Invalid commitment tx signature from peer' + run_onion_failure_test("final_incorrect_htlc_amount", 1, &nodes, &channels, &route, &payment_hash, |_msg| {}, |msg| { + // violate amt_to_forward > msg.amount_msat + msg.amount_msat -= 1; + }, true, Some(19), None); + */ + // TODO: channel_disabled (UPDATE|20) + run_onion_failure_test("expiry_too_far", 0, &nodes, &channels, &route, &payment_hash, |msg| { + let session_priv = SecretKey::from_slice(&::secp256k1::Secp256k1::without_caps(), &[3; 32]).unwrap(); + let mut route = route.clone(); + let height = 1; + route.hops[1].cltv_expiry_delta += CLTV_FAR_FAR_AWAY + route.hops[0].cltv_expiry_delta + 1; + let onion_keys = ChannelManager::construct_onion_keys(&Secp256k1::new(), &route, &session_priv).unwrap(); + let (onion_payloads, _htlc_msat, htlc_cltv) = ChannelManager::build_onion_payloads(&route, height).unwrap(); + let onion_packet = ChannelManager::construct_onion_packet(onion_payloads, onion_keys, &payment_hash); + msg.cltv_expiry = htlc_cltv; + msg.onion_routing_packet = onion_packet; + }, |_msg| {}, true, Some(21), None); + } } diff --git a/src/ln/channelmonitor.rs b/src/ln/channelmonitor.rs index 8d6d23ad00a..59ed177b38f 100644 --- a/src/ln/channelmonitor.rs +++ b/src/ln/channelmonitor.rs @@ -155,7 +155,13 @@ impl ManyChannelMonitor for SimpleManyChannelMonitor { const CLTV_SHARED_CLAIM_BUFFER: u32 = 12; /// If an HTLC expires within this many blocks, force-close the channel to broadcast the /// HTLC-Success transaction. -const CLTV_CLAIM_BUFFER: u32 = 6; +/// In other words, this is an upper bound on how many blocks we think it can take us to get a +/// transaction confirmed (and we use it in a few more, equivalent, places). +pub(crate) const CLTV_CLAIM_BUFFER: u32 = 6; +/// Number of blocks by which point we expect our counterparty to have seen new blocks on the +/// network and done a full update_fail_htlc/commitment_signed dance (+ we've updated all our +/// copies of ChannelMonitors, including watchtowers). +pub(crate) const HTLC_FAIL_TIMEOUT_BLOCKS: u32 = 3; #[derive(Clone, PartialEq)] enum KeyStorage { @@ -1184,16 +1190,7 @@ impl ChannelMonitor { } } if let Some(ref cur_local_tx) = self.current_local_signed_commitment_tx { - let mut needs_broadcast = false; - for &(ref htlc, _, _) in cur_local_tx.htlc_outputs.iter() { - if htlc.cltv_expiry <= height + CLTV_CLAIM_BUFFER { - if htlc.offered || self.payment_preimages.contains_key(&htlc.payment_hash) { - needs_broadcast = true; - } - } - } - - if needs_broadcast { + if self.would_broadcast_at_height(height) { broadcaster.broadcast_transaction(&cur_local_tx.tx); for tx in self.broadcast_by_local_state(&cur_local_tx) { broadcaster.broadcast_transaction(&tx); @@ -1206,10 +1203,29 @@ impl ChannelMonitor { pub(super) fn would_broadcast_at_height(&self, height: u32) -> bool { if let Some(ref cur_local_tx) = self.current_local_signed_commitment_tx { for &(ref htlc, _, _) in cur_local_tx.htlc_outputs.iter() { - if htlc.cltv_expiry <= height + CLTV_CLAIM_BUFFER { - if htlc.offered || self.payment_preimages.contains_key(&htlc.payment_hash) { - return true; - } + // For inbound HTLCs which we know the preimage for, we have to ensure we hit the + // chain with enough room to claim the HTLC without our counterparty being able to + // time out the HTLC first. + // For outbound HTLCs which our counterparty hasn't failed/claimed, our primary + // concern is being able to claim the corresponding inbound HTLC (on another + // channel) before it expires. In fact, we don't even really care if our + // counterparty here claims such an outbound HTLC after it expired as long as we + // can still claim the corresponding HTLC. Thus, to avoid needlessly hitting the + // chain when our counterparty is waiting for expiration to off-chain fail an HTLC + // we give ourselves a few blocks of headroom after expiration before going + // on-chain for an expired HTLC. + // Note that, to avoid a potential attack whereby a node delays claiming an HTLC + // from us until we've reached the point where we go on-chain with the + // corresponding inbound HTLC, we must ensure that outbound HTLCs go on chain at + // least CLTV_CLAIM_BUFFER blocks prior to the inbound HTLC. + // aka outbound_cltv + HTLC_FAIL_TIMEOUT_BLOCKS == height - CLTV_CLAIM_BUFFER + // inbound_cltv == height + CLTV_CLAIM_BUFFER + // outbound_cltv + HTLC_FAIL_TIMEOUT_BLOCKS + CLTV_CLAIM_BUFER <= inbound_cltv - CLTV_CLAIM_BUFFER + // HTLC_FAIL_TIMEOUT_BLOCKS + 2*CLTV_CLAIM_BUFER <= inbound_cltv - outbound_cltv + // HTLC_FAIL_TIMEOUT_BLOCKS + 2*CLTV_CLAIM_BUFER <= CLTV_EXPIRY_DELTA + if ( htlc.offered && htlc.cltv_expiry + HTLC_FAIL_TIMEOUT_BLOCKS <= height) || + (!htlc.offered && htlc.cltv_expiry <= height + CLTV_CLAIM_BUFFER && self.payment_preimages.contains_key(&htlc.payment_hash)) { + return true; } } } diff --git a/src/ln/msgs.rs b/src/ln/msgs.rs index 5b3d57aae02..7b0254bddfd 100644 --- a/src/ln/msgs.rs +++ b/src/ln/msgs.rs @@ -485,7 +485,19 @@ pub enum HTLCFailChannelUpdate { ChannelClosed { /// The short_channel_id which has now closed. short_channel_id: u64, + /// when this true, this channel should be permanently removed from the + /// consideration. Otherwise, this channel can be restored as new channel_update is received + is_permanent: bool, }, + /// We received an error which indicated only that a node has failed + NodeFailure { + /// The node_id that has failed. + node_id: PublicKey, + /// when this true, node should be permanently removed from the + /// consideration. Otherwise, the channels connected to this node can be + /// restored as new channel_update is received + is_permanent: bool, + } } /// A trait to describe an object which can receive channel messages. @@ -517,7 +529,7 @@ pub trait ChannelMessageHandler : events::EventsProvider + Send + Sync { /// Handle an incoming update_fulfill_htlc message from the given peer. fn handle_update_fulfill_htlc(&self, their_node_id: &PublicKey, msg: &UpdateFulfillHTLC) -> Result<(), HandleError>; /// Handle an incoming update_fail_htlc message from the given peer. - fn handle_update_fail_htlc(&self, their_node_id: &PublicKey, msg: &UpdateFailHTLC) -> Result, HandleError>; + fn handle_update_fail_htlc(&self, their_node_id: &PublicKey, msg: &UpdateFailHTLC) -> Result<(), HandleError>; /// Handle an incoming update_fail_malformed_htlc message from the given peer. fn handle_update_fail_malformed_htlc(&self, their_node_id: &PublicKey, msg: &UpdateFailMalformedHTLC) -> Result<(), HandleError>; /// Handle an incoming commitment_signed message from the given peer. diff --git a/src/ln/peer_handler.rs b/src/ln/peer_handler.rs index 76cc35f64c5..fc68092da7a 100644 --- a/src/ln/peer_handler.rs +++ b/src/ln/peer_handler.rs @@ -615,10 +615,7 @@ impl PeerManager { }, 131 => { let msg = try_potential_decodeerror!(msgs::UpdateFailHTLC::read(&mut reader)); - let chan_update = try_potential_handleerror!(self.message_handler.chan_handler.handle_update_fail_htlc(&peer.their_node_id.unwrap(), &msg)); - if let Some(update) = chan_update { - self.message_handler.route_handler.handle_htlc_fail_channel_update(&update); - } + try_potential_handleerror!(self.message_handler.chan_handler.handle_update_fail_htlc(&peer.their_node_id.unwrap(), &msg)); }, 135 => { let msg = try_potential_decodeerror!(msgs::UpdateFailMalformedHTLC::read(&mut reader)); @@ -906,6 +903,10 @@ impl PeerManager { } continue; }, + Event::RouteUpdate { ref update } => { + self.message_handler.route_handler.handle_htlc_fail_channel_update(update); + continue; + }, Event::HandleError { ref node_id, ref action } => { if let Some(ref action) = *action { match *action { diff --git a/src/ln/router.rs b/src/ln/router.rs index 4a55df88c6f..4d256a12ce1 100644 --- a/src/ln/router.rs +++ b/src/ln/router.rs @@ -36,6 +36,9 @@ pub struct RouteHop { /// The CLTV delta added for this hop. For the last hop, this should be the full CLTV value /// expected at the destination, in excess of the current block height. pub cltv_expiry_delta: u32, + /// channel update timestamp of the outbound channel of each hop when this route is + /// calculated + pub channel_update_timestamp: u32, } /// A route from us through the network to a destination @@ -168,6 +171,8 @@ impl NetworkMap { pub struct RouteHint { /// The node_id of the non-target end of the route pub src_node_id: PublicKey, + /// last update + pub last_update: u32, /// The short_channel_id of this channel pub short_channel_id: u64, /// The static msat-denominated fee which must be paid to use this channel @@ -349,12 +354,17 @@ impl RoutingMessageHandler for Router { &msgs::HTLCFailChannelUpdate::ChannelUpdateMessage { ref msg } => { let _ = self.handle_channel_update(msg); }, - &msgs::HTLCFailChannelUpdate::ChannelClosed { ref short_channel_id } => { + &msgs::HTLCFailChannelUpdate::ChannelClosed { ref short_channel_id, is_permanent:_ } => { let mut network = self.network_map.write().unwrap(); if let Some(chan) = network.channels.remove(short_channel_id) { Self::remove_channel_in_nodes(&mut network.nodes, &chan, *short_channel_id); } }, + &msgs::HTLCFailChannelUpdate::NodeFailure { ref node_id, is_permanent:_ } => { + //let mut network = self.network_map.write().unwrap(); + //TODO: check _blamed_upstream_node + self.mark_node_bad(node_id, false); + }, } } @@ -450,6 +460,7 @@ impl cmp::PartialOrd for RouteGraphNode { struct DummyDirectionalChannelInfo { src_node_id: PublicKey, + last_update: u32, cltv_expiry_delta: u32, htlc_minimum_msat: u64, fee_base_msat: u32, @@ -560,6 +571,7 @@ impl Router { let dummy_directional_info = DummyDirectionalChannelInfo { // used for first_hops routes src_node_id: network.our_node_id.clone(), + last_update: 0, cltv_expiry_delta: 0, htlc_minimum_msat: 0, fee_base_msat: 0, @@ -580,6 +592,7 @@ impl Router { short_channel_id, fee_msat: final_value_msat, cltv_expiry_delta: final_cltv, + channel_update_timestamp: 0, //TODO: related to local channel_update_handling? }], }); } @@ -613,6 +626,7 @@ impl Router { short_channel_id: 0, fee_msat: 0, cltv_expiry_delta: 0, + channel_update_timestamp: $directional_info.last_update, }) }); if $directional_info.src_node_id != network.our_node_id { @@ -639,6 +653,7 @@ impl Router { short_channel_id: $chan_id.clone(), fee_msat: new_fee, // This field is ignored on the last-hop anyway cltv_expiry_delta: $directional_info.cltv_expiry_delta as u32, + channel_update_timestamp: $directional_info.last_update, } } } @@ -1161,6 +1176,7 @@ mod tests { let mut last_hops = vec!(RouteHint { src_node_id: node4.clone(), short_channel_id: 8, + last_update: 0, fee_base_msat: 0, fee_proportional_millionths: 0, cltv_expiry_delta: (8 << 8) | 1, @@ -1168,6 +1184,7 @@ mod tests { }, RouteHint { src_node_id: node5.clone(), short_channel_id: 9, + last_update: 0, fee_base_msat: 1001, fee_proportional_millionths: 0, cltv_expiry_delta: (9 << 8) | 1, @@ -1175,6 +1192,7 @@ mod tests { }, RouteHint { src_node_id: node6.clone(), short_channel_id: 10, + last_update: 0, fee_base_msat: 0, fee_proportional_millionths: 0, cltv_expiry_delta: (10 << 8) | 1, diff --git a/src/util/events.rs b/src/util/events.rs index a317f076658..52ce184a567 100644 --- a/src/util/events.rs +++ b/src/util/events.rs @@ -75,6 +75,10 @@ pub enum Event { PaymentFailed { /// The hash which was given to ChannelManager::send_payment. payment_hash: [u8; 32], + /// Inindicates if the payment can be retried. + retryable: bool, + /// Error code from Onion failure message + error_code: Option, }, /// Used to indicate that ChannelManager::process_pending_htlc_forwards should be called at a /// time in the future. @@ -161,6 +165,13 @@ pub enum Event { node_id: PublicKey, /// The action which should be taken. action: Option + }, + + /// Used to indicate a change should be made into network map + /// + RouteUpdate { + /// The channel/node update which should be sent to router + update: msgs::HTLCFailChannelUpdate, } } diff --git a/src/util/test_utils.rs b/src/util/test_utils.rs index 49aa269b3b4..f8341e88587 100644 --- a/src/util/test_utils.rs +++ b/src/util/test_utils.rs @@ -110,7 +110,7 @@ impl msgs::ChannelMessageHandler for TestChannelMessageHandler { fn handle_update_fulfill_htlc(&self, _their_node_id: &PublicKey, _msg: &msgs::UpdateFulfillHTLC) -> Result<(), HandleError> { Err(HandleError { err: "", action: None }) } - fn handle_update_fail_htlc(&self, _their_node_id: &PublicKey, _msg: &msgs::UpdateFailHTLC) -> Result, HandleError> { + fn handle_update_fail_htlc(&self, _their_node_id: &PublicKey, _msg: &msgs::UpdateFailHTLC) -> Result<(), HandleError> { Err(HandleError { err: "", action: None }) } fn handle_update_fail_malformed_htlc(&self, _their_node_id: &PublicKey, _msg: &msgs::UpdateFailMalformedHTLC) -> Result<(), HandleError> {