-
Notifications
You must be signed in to change notification settings - Fork 407
TLV-based Variable Length Onion #434
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
87a0018
36c725f
c2a47d1
8f37503
f990aac
66c4ed2
85c8410
c326061
c94e53d
bfe59a7
65a2bcf
bec0a26
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -38,21 +38,19 @@ use chain::keysinterface::{ChannelKeys, KeysInterface, InMemoryChannelKeys}; | |
use util::config::UserConfig; | ||
use util::{byte_utils, events}; | ||
use util::ser::{Readable, ReadableArgs, Writeable, Writer}; | ||
use util::chacha20::ChaCha20; | ||
use util::chacha20::{ChaCha20, ChaChaReader}; | ||
use util::logger::Logger; | ||
use util::errors::APIError; | ||
|
||
use std::{cmp, mem}; | ||
use std::collections::{HashMap, hash_map, HashSet}; | ||
use std::io::Cursor; | ||
use std::io::{Cursor, Read}; | ||
use std::sync::{Arc, Mutex, MutexGuard, RwLock}; | ||
use std::sync::atomic::{AtomicUsize, Ordering}; | ||
use std::time::Duration; | ||
use std::marker::{Sync, Send}; | ||
use std::ops::Deref; | ||
|
||
const SIXTY_FIVE_ZEROS: [u8; 65] = [0; 65]; | ||
|
||
// We hold various information about HTLC relay in the HTLC objects in Channel itself: | ||
// | ||
// Upon receipt of an HTLC from a peer, we'll give it a PendingHTLCStatus indicating if it should | ||
|
@@ -906,22 +904,30 @@ impl<ChanSigner: ChannelKeys, M: Deref> ChannelManager<ChanSigner, M> where M::T | |
} | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. side-note: to save some hash+ECDH operations, the version check should happen first. It's not going to protect against intentional DoSy behavior but if a buggy node keep sending us non-supported packet due to misinterpreting our features signaling that's better. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. No it absolutely should not. That would expose a timing oracle as to whether or not the first byte of the ciphertext was X. It probably wouldn't explicitly break anything, but that's how we end up with Bleichenbacher attacks. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. But the version byte isn't part of the ciphertext ? Bleichenbacher lies on the fact that's the padding is part of the ciphertext which isn't the case here. I mention this at first because the order is described in the spec : "Upon receiving a packet, a processing node compares the version byte of the packet with its own supported versions and aborts the connection if the packet specifies a version number that it doesn't support" (packet forwarding) There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Ah, I misunderstood what you were suggesting - the comment was on a "}" so I have no context. Indeed, we already check the version before checking the HMAC (and return a differentiating error message anyway), so moving it forward should be fine, but also an existing issue that isnt changed here. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Yeah sorry Github sucks for commenting something PR-related but not directly on changes themselves. We can do this later, that's why I mentioned it as side-note. |
||
|
||
let mut chacha = ChaCha20::new(&rho, &[0u8; 8]); | ||
let next_hop_data = { | ||
let mut decoded = [0; 65]; | ||
chacha.process(&msg.onion_routing_packet.hop_data[0..65], &mut decoded); | ||
match msgs::OnionHopData::read(&mut Cursor::new(&decoded[..])) { | ||
let mut chacha_stream = ChaChaReader { chacha: &mut chacha, read: Cursor::new(&msg.onion_routing_packet.hop_data[..]) }; | ||
let (next_hop_data, next_hop_hmac) = { | ||
match msgs::OnionHopData::read(&mut chacha_stream) { | ||
Err(err) => { | ||
let error_code = match err { | ||
msgs::DecodeError::UnknownVersion => 0x4000 | 1, // unknown realm byte | ||
msgs::DecodeError::UnknownRequiredFeature| | ||
msgs::DecodeError::InvalidValue| | ||
msgs::DecodeError::ShortRead => 0x4000 | 22, // invalid_onion_payload | ||
_ => 0x2000 | 2, // Should never happen | ||
}; | ||
return_err!("Unable to decode our hop data", error_code, &[0;0]); | ||
}, | ||
Ok(msg) => msg | ||
Ok(msg) => { | ||
let mut hmac = [0; 32]; | ||
if let Err(_) = chacha_stream.read_exact(&mut hmac[..]) { | ||
return_err!("Unable to decode hop data", 0x4000 | 22, &[0;0]); | ||
} | ||
(msg, hmac) | ||
}, | ||
} | ||
}; | ||
|
||
let pending_forward_info = if next_hop_data.hmac == [0; 32] { | ||
let pending_forward_info = if next_hop_hmac == [0; 32] { | ||
#[cfg(test)] | ||
{ | ||
// In tests, make sure that the initial onion pcket data is, at least, non-0. | ||
|
@@ -931,10 +937,11 @@ impl<ChanSigner: ChannelKeys, M: Deref> ChannelManager<ChanSigner, M> where M::T | |
// as-is (and were originally 0s). | ||
// Of course reverse path calculation is still pretty easy given naive routing | ||
// algorithms, but this fixes the most-obvious case. | ||
let mut new_packet_data = [0; 19*65]; | ||
chacha.process(&msg.onion_routing_packet.hop_data[65..], &mut new_packet_data[0..19*65]); | ||
assert_ne!(new_packet_data[0..65], [0; 65][..]); | ||
assert_ne!(new_packet_data[..], [0; 19*65][..]); | ||
let mut next_bytes = [0; 32]; | ||
chacha_stream.read_exact(&mut next_bytes).unwrap(); | ||
assert_ne!(next_bytes[..], [0; 32][..]); | ||
chacha_stream.read_exact(&mut next_bytes).unwrap(); | ||
assert_ne!(next_bytes[..], [0; 32][..]); | ||
} | ||
|
||
// OUR PAYMENT! | ||
|
@@ -943,11 +950,11 @@ impl<ChanSigner: ChannelKeys, M: Deref> ChannelManager<ChanSigner, M> where M::T | |
return_err!("The final CLTV expiry is too soon to handle", 17, &[0;0]); | ||
} | ||
// final_incorrect_htlc_amount | ||
if next_hop_data.data.amt_to_forward > msg.amount_msat { | ||
if next_hop_data.amt_to_forward > msg.amount_msat { | ||
return_err!("Upstream node sent less than we were supposed to receive in payment", 19, &byte_utils::be64_to_array(msg.amount_msat)); | ||
} | ||
// final_incorrect_cltv_expiry | ||
if next_hop_data.data.outgoing_cltv_value != msg.cltv_expiry { | ||
if next_hop_data.outgoing_cltv_value != msg.cltv_expiry { | ||
return_err!("Upstream node set CLTV to the wrong value", 18, &byte_utils::be32_to_array(msg.cltv_expiry)); | ||
} | ||
|
||
|
@@ -961,13 +968,24 @@ impl<ChanSigner: ChannelKeys, M: Deref> ChannelManager<ChanSigner, M> where M::T | |
payment_hash: msg.payment_hash.clone(), | ||
short_channel_id: 0, | ||
incoming_shared_secret: shared_secret, | ||
amt_to_forward: next_hop_data.data.amt_to_forward, | ||
outgoing_cltv_value: next_hop_data.data.outgoing_cltv_value, | ||
amt_to_forward: next_hop_data.amt_to_forward, | ||
outgoing_cltv_value: next_hop_data.outgoing_cltv_value, | ||
}) | ||
} else { | ||
let mut new_packet_data = [0; 20*65]; | ||
chacha.process(&msg.onion_routing_packet.hop_data[65..], &mut new_packet_data[0..19*65]); | ||
chacha.process(&SIXTY_FIVE_ZEROS[..], &mut new_packet_data[19*65..]); | ||
let read_pos = chacha_stream.read(&mut new_packet_data).unwrap(); | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Onion forward processing, is a bit tricky, could be better documented. Like "Read leftover of incoming hops_data to 1300 0x00bytes. Then XOR rho-key pseudo-random stream (1300 - hop_data length)-bytes length to new buffer". There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Hmm, I think I added comments in response to Jeff's comments while you were posting this. Are you happy with it as it is now? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. We can refactor it later, I think There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. @arik-so What could be refactored here? |
||
#[cfg(debug_assertions)] | ||
{ | ||
// Check two things: | ||
// a) that the behavior of our stream here will return Ok(0) even if the TLV | ||
// read above emptied out our buffer and the unwrap() wont needlessly panic | ||
// b) that we didn't somehow magically end up with extra data. | ||
let mut t = [0; 1]; | ||
debug_assert!(chacha_stream.read(&mut t).unwrap() == 0); | ||
} | ||
// Once we've emptied the set of bytes our peer gave us, encrypt 0 bytes until we | ||
// fill the onion hop data we'll forward to our next-hop peer. | ||
chacha_stream.chacha.process_in_place(&mut new_packet_data[read_pos..]); | ||
|
||
let mut new_pubkey = msg.onion_routing_packet.public_key.unwrap(); | ||
|
||
|
@@ -986,16 +1004,24 @@ impl<ChanSigner: ChannelKeys, M: Deref> ChannelManager<ChanSigner, M> where M::T | |
version: 0, | ||
public_key, | ||
hop_data: new_packet_data, | ||
hmac: next_hop_data.hmac.clone(), | ||
hmac: next_hop_hmac.clone(), | ||
}; | ||
|
||
let short_channel_id = match next_hop_data.format { | ||
msgs::OnionHopDataFormat::Legacy { short_channel_id } => short_channel_id, | ||
msgs::OnionHopDataFormat::NonFinalNode { short_channel_id } => short_channel_id, | ||
msgs::OnionHopDataFormat::FinalNode => { | ||
return_err!("Final Node OnionHopData provided for us as an intermediary node", 0x4000 | 22, &[0;0]); | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Following BOLT4, "the erring node may include that type and its byte offset in the decrypted byte stream." so here we may return type 6 and offset of short_channel_id (but is this making sense because now you have to save offset for every type at deserialization in case you fail validation ?) There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Type 6? The spec indicates that we could store where in the TLV stream we failed to read, and tell our peers that (I presume as a way to help debug errors, since the data isn't machine-respondable-to anyway). I'm super not gonna bother doing that. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I agree don't bother just comment maybe we don't strictly adhere to the spec recommendation (which seems really loose in this case) There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Its a recommendation, though, its MAY...we're still strictly adhering to the spec....I dont think we call it out in other places. |
||
}, | ||
}; | ||
|
||
PendingHTLCStatus::Forward(PendingForwardHTLCInfo { | ||
onion_packet: Some(outgoing_packet), | ||
payment_hash: msg.payment_hash.clone(), | ||
short_channel_id: next_hop_data.data.short_channel_id, | ||
short_channel_id: short_channel_id, | ||
incoming_shared_secret: shared_secret, | ||
amt_to_forward: next_hop_data.data.amt_to_forward, | ||
outgoing_cltv_value: next_hop_data.data.outgoing_cltv_value, | ||
amt_to_forward: next_hop_data.amt_to_forward, | ||
outgoing_cltv_value: next_hop_data.outgoing_cltv_value, | ||
}) | ||
}; | ||
|
||
|
@@ -1137,6 +1163,9 @@ impl<ChanSigner: ChannelKeys, M: Deref> ChannelManager<ChanSigner, M> where M::T | |
let onion_keys = secp_call!(onion_utils::construct_onion_keys(&self.secp_ctx, &route, &session_priv), | ||
APIError::RouteError{err: "Pubkey along hop was maliciously selected"}); | ||
let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(&route, cur_height)?; | ||
if onion_utils::route_size_insane(&onion_payloads) { | ||
return Err(APIError::RouteError{err: "Route size too large considering onion data"}); | ||
} | ||
let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, prng_seed, &payment_hash); | ||
|
||
let _ = self.total_consistency_lock.read().unwrap(); | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -29,6 +29,10 @@ mod sealed { // You should just use the type aliases instead. | |
pub trait UpfrontShutdownScript: Context {} | ||
impl UpfrontShutdownScript for InitContext {} | ||
impl UpfrontShutdownScript for NodeContext {} | ||
|
||
pub trait VariableLengthOnion: Context {} | ||
impl VariableLengthOnion for InitContext {} | ||
impl VariableLengthOnion for NodeContext {} | ||
} | ||
|
||
/// Tracks the set of features which a node implements, templated by the context in which it | ||
|
@@ -69,7 +73,7 @@ impl InitFeatures { | |
/// Create a Features with the features we support | ||
pub fn supported() -> InitFeatures { | ||
InitFeatures { | ||
flags: vec![2 | 1 << 5], | ||
flags: vec![2 | 1 << 5, 1 << (9-8)], | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. we may wanna make these statements more reader-friendly at some point There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Maybe lets fix that in #440 or so. We're not making it worse here, and a followup would be good. |
||
mark: PhantomData, | ||
} | ||
} | ||
|
@@ -132,14 +136,14 @@ impl NodeFeatures { | |
#[cfg(not(feature = "fuzztarget"))] | ||
pub(crate) fn supported() -> NodeFeatures { | ||
NodeFeatures { | ||
flags: vec![2 | 1 << 5], | ||
flags: vec![2 | 1 << 5, 1 << (9-8)], | ||
mark: PhantomData, | ||
} | ||
} | ||
#[cfg(feature = "fuzztarget")] | ||
pub fn supported() -> NodeFeatures { | ||
NodeFeatures { | ||
flags: vec![2 | 1 << 5], | ||
flags: vec![2 | 1 << 5, 1 << (9-8)], | ||
mark: PhantomData, | ||
} | ||
} | ||
|
@@ -182,13 +186,21 @@ impl<T: sealed::Context> Features<T> { | |
|
||
pub(crate) fn requires_unknown_bits(&self) -> bool { | ||
self.flags.iter().enumerate().any(|(idx, &byte)| { | ||
( idx != 0 && (byte & 0x55) != 0 ) || ( idx == 0 && (byte & 0x14) != 0 ) | ||
(match idx { | ||
0 => (byte & 0b00010100), | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Shouldn't the bit pattern be Anyway add comments like "(00) data_loss_protect - (01) initial_routing_sync - (00) upfront_shutdown_script ..." There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I believe this (and other issues) are fixed in #440. |
||
1 => (byte & 0b01010100), | ||
_ => (byte & 0b01010101), | ||
}) != 0 | ||
}) | ||
} | ||
|
||
pub(crate) fn supports_unknown_bits(&self) -> bool { | ||
self.flags.iter().enumerate().any(|(idx, &byte)| { | ||
( idx != 0 && byte != 0 ) || ( idx == 0 && (byte & 0xc4) != 0 ) | ||
(match idx { | ||
0 => (byte & 0b11000100), | ||
1 => (byte & 0b11111100), | ||
_ => byte, | ||
}) != 0 | ||
}) | ||
} | ||
|
||
|
@@ -232,6 +244,12 @@ impl<T: sealed::UpfrontShutdownScript> Features<T> { | |
} | ||
} | ||
|
||
impl<T: sealed::VariableLengthOnion> Features<T> { | ||
pub(crate) fn supports_variable_length_onion(&self) -> bool { | ||
self.flags.len() > 1 && (self.flags[1] & 3) != 0 | ||
} | ||
} | ||
|
||
impl<T: sealed::InitialRoutingSync> Features<T> { | ||
pub(crate) fn initial_routing_sync(&self) -> bool { | ||
self.flags.len() > 0 && (self.flags[0] & (1 << 3)) != 0 | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -18,7 +18,7 @@ use util::enforcing_trait_impls::EnforcingChannelKeys; | |
use util::test_utils; | ||
use util::events::{Event, EventsProvider, MessageSendEvent, MessageSendEventsProvider}; | ||
use util::errors::APIError; | ||
use util::ser::{Writeable, ReadableArgs}; | ||
use util::ser::{Writeable, Writer, ReadableArgs}; | ||
use util::config::UserConfig; | ||
use util::logger::Logger; | ||
|
||
|
@@ -44,7 +44,7 @@ use std::collections::{BTreeSet, HashMap, HashSet}; | |
use std::default::Default; | ||
use std::sync::{Arc, Mutex}; | ||
use std::sync::atomic::Ordering; | ||
use std::mem; | ||
use std::{mem, io}; | ||
|
||
use rand::{thread_rng, Rng}; | ||
|
||
|
@@ -5007,6 +5007,20 @@ impl msgs::ChannelUpdate { | |
} | ||
} | ||
|
||
struct BogusOnionHopData { | ||
data: Vec<u8> | ||
} | ||
impl BogusOnionHopData { | ||
fn new(orig: msgs::OnionHopData) -> Self { | ||
Self { data: orig.encode() } | ||
} | ||
} | ||
impl Writeable for BogusOnionHopData { | ||
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> { | ||
writer.write_all(&self.data[..]) | ||
} | ||
} | ||
|
||
#[test] | ||
fn test_onion_failure() { | ||
use ln::msgs::ChannelUpdate; | ||
|
@@ -5036,19 +5050,31 @@ fn test_onion_failure() { | |
let cur_height = nodes[0].node.latest_block_height.load(Ordering::Acquire) as u32 + 1; | ||
let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::new(), &route, &session_priv).unwrap(); | ||
let (mut onion_payloads, _htlc_msat, _htlc_cltv) = onion_utils::build_onion_payloads(&route, cur_height).unwrap(); | ||
onion_payloads[0].realm = 3; | ||
msg.onion_routing_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &payment_hash); | ||
}, ||{}, true, Some(PERM|1), Some(msgs::HTLCFailChannelUpdate::ChannelClosed{short_channel_id: channels[1].0.contents.short_channel_id, is_permanent: true}));//XXX incremented channels idx here | ||
let mut new_payloads = Vec::new(); | ||
for payload in onion_payloads.drain(..) { | ||
new_payloads.push(BogusOnionHopData::new(payload)); | ||
} | ||
// break the first (non-final) hop payload by swapping the realm (0) byte for a byte | ||
// describing a length-1 TLV payload, which is obviously bogus. | ||
new_payloads[0].data[0] = 1; | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. The significance of this is unclear to me. Is this the version byte? Can we use a constant for it? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Its just to invalidate it, added a comment to note what it does. |
||
msg.onion_routing_packet = onion_utils::construct_onion_packet_bogus_hopdata(new_payloads, onion_keys, [0; 32], &payment_hash); | ||
}, ||{}, true, Some(PERM|22), Some(msgs::HTLCFailChannelUpdate::ChannelClosed{short_channel_id: channels[1].0.contents.short_channel_id, is_permanent: true}));//XXX incremented channels idx here | ||
|
||
// final node failure | ||
run_onion_failure_test("invalid_realm", 3, &nodes, &route, &payment_hash, |msg| { | ||
let session_priv = SecretKey::from_slice(&[3; 32]).unwrap(); | ||
let cur_height = nodes[0].node.latest_block_height.load(Ordering::Acquire) as u32 + 1; | ||
let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::new(), &route, &session_priv).unwrap(); | ||
let (mut onion_payloads, _htlc_msat, _htlc_cltv) = onion_utils::build_onion_payloads(&route, cur_height).unwrap(); | ||
onion_payloads[1].realm = 3; | ||
msg.onion_routing_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &payment_hash); | ||
}, ||{}, false, Some(PERM|1), Some(msgs::HTLCFailChannelUpdate::ChannelClosed{short_channel_id: channels[1].0.contents.short_channel_id, is_permanent: true})); | ||
let mut new_payloads = Vec::new(); | ||
for payload in onion_payloads.drain(..) { | ||
new_payloads.push(BogusOnionHopData::new(payload)); | ||
} | ||
// break the last-hop payload by swapping the realm (0) byte for a byte describing a | ||
// length-1 TLV payload, which is obviously bogus. | ||
new_payloads[1].data[0] = 1; | ||
msg.onion_routing_packet = onion_utils::construct_onion_packet_bogus_hopdata(new_payloads, onion_keys, [0; 32], &payment_hash); | ||
}, ||{}, false, Some(PERM|22), Some(msgs::HTLCFailChannelUpdate::ChannelClosed{short_channel_id: channels[1].0.contents.short_channel_id, is_permanent: true})); | ||
|
||
// the following three with run_onion_failure_test_with_fail_intercept() test only the origin node | ||
// receiving simulated fail messages | ||
|
Uh oh!
There was an error while loading. Please reload this page.