Skip to content

TLV-based Variable Length Onion #434

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 12 commits into from
Feb 11, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion fuzz/src/msg_targets/gen_target.sh
Original file line number Diff line number Diff line change
Expand Up @@ -34,8 +34,8 @@ GEN_TEST NodeAnnouncement test_msg_exact ""

GEN_TEST UpdateAddHTLC test_msg_hole ", 85, 33"
GEN_TEST ErrorMessage test_msg_hole ", 32, 2"
GEN_TEST OnionHopData test_msg_hole ", 1+8+8+4, 12"

GEN_TEST Init test_msg_simple ""
GEN_TEST OnionHopData test_msg_simple ""
GEN_TEST Ping test_msg_simple ""
GEN_TEST Pong test_msg_simple ""
2 changes: 1 addition & 1 deletion fuzz/src/msg_targets/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ pub mod msg_channel_update;
pub mod msg_node_announcement;
pub mod msg_update_add_htlc;
pub mod msg_error_message;
pub mod msg_onion_hop_data;
pub mod msg_init;
pub mod msg_onion_hop_data;
pub mod msg_ping;
pub mod msg_pong;
2 changes: 1 addition & 1 deletion fuzz/src/msg_targets/msg_onion_hop_data.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ use msg_targets::utils::VecWriter;

#[inline]
pub fn do_test(data: &[u8]) {
test_msg_hole!(msgs::OnionHopData, data, 1+8+8+4, 12);
test_msg_simple!(msgs::OnionHopData, data);
}

#[no_mangle]
Expand Down
21 changes: 20 additions & 1 deletion fuzz/src/msg_targets/utils.rs
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,15 @@ impl Writer for VecWriter {
}
}

// We attempt to test the strictest behavior we can for a given message, however, some messages
// have different expected behavior. You can see which messages have which behavior in
// gen_target.sh, but, in general, the *_announcement messages have to round-trip exactly (as
// otherwise we'd invalidate the signatures), most messages just need to round-trip up to the
// amount of data we know how to interpret, and some messages we may throw out invalid stuff (eg
// if an error message isn't valid UTF-8 we cant String-ize it), so they wont roundtrip correctly.

// Tests a message that must survive roundtrip exactly, though may not empty the read buffer
// entirely
#[macro_export]
macro_rules! test_msg {
($MsgType: path, $data: ident) => {
Expand All @@ -31,6 +40,8 @@ macro_rules! test_msg {
}
}

// Tests a message that may lose data on roundtrip, but shoulnd't lose data compared to our
// re-serialization.
#[macro_export]
macro_rules! test_msg_simple {
($MsgType: path, $data: ident) => {
Expand All @@ -40,11 +51,18 @@ macro_rules! test_msg_simple {
if let Ok(msg) = <$MsgType as Readable<::std::io::Cursor<&[u8]>>>::read(&mut r) {
let mut w = VecWriter(Vec::new());
msg.write(&mut w).unwrap();

let msg = <$MsgType as Readable<::std::io::Cursor<&[u8]>>>::read(&mut ::std::io::Cursor::new(&w.0)).unwrap();
let mut w_two = VecWriter(Vec::new());
msg.write(&mut w_two).unwrap();
assert_eq!(&w.0[..], &w_two.0[..]);
}
}
}
}

// Tests a message that must survive roundtrip exactly, and must exactly empty the read buffer and
// split it back out on re-serialization.
#[macro_export]
macro_rules! test_msg_exact {
($MsgType: path, $data: ident) => {
Expand All @@ -54,13 +72,14 @@ macro_rules! test_msg_exact {
if let Ok(msg) = <$MsgType as Readable<::std::io::Cursor<&[u8]>>>::read(&mut r) {
let mut w = VecWriter(Vec::new());
msg.write(&mut w).unwrap();

assert_eq!(&r.into_inner()[..], &w.0[..]);
}
}
}
}

// Tests a message that must survive roundtrip exactly, modulo one "hole" which may be set to 0s on
// re-serialization.
#[macro_export]
macro_rules! test_msg_hole {
($MsgType: path, $data: ident, $hole: expr, $hole_len: expr) => {
Expand Down
77 changes: 53 additions & 24 deletions lightning/src/ln/channelmanager.rs
Original file line number Diff line number Diff line change
Expand Up @@ -38,21 +38,19 @@ use chain::keysinterface::{ChannelKeys, KeysInterface, InMemoryChannelKeys};
use util::config::UserConfig;
use util::{byte_utils, events};
use util::ser::{Readable, ReadableArgs, Writeable, Writer};
use util::chacha20::ChaCha20;
use util::chacha20::{ChaCha20, ChaChaReader};
use util::logger::Logger;
use util::errors::APIError;

use std::{cmp, mem};
use std::collections::{HashMap, hash_map, HashSet};
use std::io::Cursor;
use std::io::{Cursor, Read};
use std::sync::{Arc, Mutex, MutexGuard, RwLock};
use std::sync::atomic::{AtomicUsize, Ordering};
use std::time::Duration;
use std::marker::{Sync, Send};
use std::ops::Deref;

const SIXTY_FIVE_ZEROS: [u8; 65] = [0; 65];

// We hold various information about HTLC relay in the HTLC objects in Channel itself:
//
// Upon receipt of an HTLC from a peer, we'll give it a PendingHTLCStatus indicating if it should
Expand Down Expand Up @@ -906,22 +904,30 @@ impl<ChanSigner: ChannelKeys, M: Deref> ChannelManager<ChanSigner, M> where M::T
}
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

side-note: to save some hash+ECDH operations, the version check should happen first. It's not going to protect against intentional DoSy behavior but if a buggy node keep sending us non-supported packet due to misinterpreting our features signaling that's better.

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

No it absolutely should not. That would expose a timing oracle as to whether or not the first byte of the ciphertext was X. It probably wouldn't explicitly break anything, but that's how we end up with Bleichenbacher attacks.

Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

But the version byte isn't part of the ciphertext ? Bleichenbacher lies on the fact that's the padding is part of the ciphertext which isn't the case here.

I mention this at first because the order is described in the spec : "Upon receiving a packet, a processing node compares the version byte of the packet with its own supported versions and aborts the connection if the packet specifies a version number that it doesn't support" (packet forwarding)

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Ah, I misunderstood what you were suggesting - the comment was on a "}" so I have no context. Indeed, we already check the version before checking the HMAC (and return a differentiating error message anyway), so moving it forward should be fine, but also an existing issue that isnt changed here.

Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yeah sorry Github sucks for commenting something PR-related but not directly on changes themselves. We can do this later, that's why I mentioned it as side-note.


let mut chacha = ChaCha20::new(&rho, &[0u8; 8]);
let next_hop_data = {
let mut decoded = [0; 65];
chacha.process(&msg.onion_routing_packet.hop_data[0..65], &mut decoded);
match msgs::OnionHopData::read(&mut Cursor::new(&decoded[..])) {
let mut chacha_stream = ChaChaReader { chacha: &mut chacha, read: Cursor::new(&msg.onion_routing_packet.hop_data[..]) };
let (next_hop_data, next_hop_hmac) = {
match msgs::OnionHopData::read(&mut chacha_stream) {
Err(err) => {
let error_code = match err {
msgs::DecodeError::UnknownVersion => 0x4000 | 1, // unknown realm byte
msgs::DecodeError::UnknownRequiredFeature|
msgs::DecodeError::InvalidValue|
msgs::DecodeError::ShortRead => 0x4000 | 22, // invalid_onion_payload
_ => 0x2000 | 2, // Should never happen
};
return_err!("Unable to decode our hop data", error_code, &[0;0]);
},
Ok(msg) => msg
Ok(msg) => {
let mut hmac = [0; 32];
if let Err(_) = chacha_stream.read_exact(&mut hmac[..]) {
return_err!("Unable to decode hop data", 0x4000 | 22, &[0;0]);
}
(msg, hmac)
},
}
};

let pending_forward_info = if next_hop_data.hmac == [0; 32] {
let pending_forward_info = if next_hop_hmac == [0; 32] {
#[cfg(test)]
{
// In tests, make sure that the initial onion pcket data is, at least, non-0.
Expand All @@ -931,10 +937,11 @@ impl<ChanSigner: ChannelKeys, M: Deref> ChannelManager<ChanSigner, M> where M::T
// as-is (and were originally 0s).
// Of course reverse path calculation is still pretty easy given naive routing
// algorithms, but this fixes the most-obvious case.
let mut new_packet_data = [0; 19*65];
chacha.process(&msg.onion_routing_packet.hop_data[65..], &mut new_packet_data[0..19*65]);
assert_ne!(new_packet_data[0..65], [0; 65][..]);
assert_ne!(new_packet_data[..], [0; 19*65][..]);
let mut next_bytes = [0; 32];
chacha_stream.read_exact(&mut next_bytes).unwrap();
assert_ne!(next_bytes[..], [0; 32][..]);
chacha_stream.read_exact(&mut next_bytes).unwrap();
assert_ne!(next_bytes[..], [0; 32][..]);
}

// OUR PAYMENT!
Expand All @@ -943,11 +950,11 @@ impl<ChanSigner: ChannelKeys, M: Deref> ChannelManager<ChanSigner, M> where M::T
return_err!("The final CLTV expiry is too soon to handle", 17, &[0;0]);
}
// final_incorrect_htlc_amount
if next_hop_data.data.amt_to_forward > msg.amount_msat {
if next_hop_data.amt_to_forward > msg.amount_msat {
return_err!("Upstream node sent less than we were supposed to receive in payment", 19, &byte_utils::be64_to_array(msg.amount_msat));
}
// final_incorrect_cltv_expiry
if next_hop_data.data.outgoing_cltv_value != msg.cltv_expiry {
if next_hop_data.outgoing_cltv_value != msg.cltv_expiry {
return_err!("Upstream node set CLTV to the wrong value", 18, &byte_utils::be32_to_array(msg.cltv_expiry));
}

Expand All @@ -961,13 +968,24 @@ impl<ChanSigner: ChannelKeys, M: Deref> ChannelManager<ChanSigner, M> where M::T
payment_hash: msg.payment_hash.clone(),
short_channel_id: 0,
incoming_shared_secret: shared_secret,
amt_to_forward: next_hop_data.data.amt_to_forward,
outgoing_cltv_value: next_hop_data.data.outgoing_cltv_value,
amt_to_forward: next_hop_data.amt_to_forward,
outgoing_cltv_value: next_hop_data.outgoing_cltv_value,
})
} else {
let mut new_packet_data = [0; 20*65];
chacha.process(&msg.onion_routing_packet.hop_data[65..], &mut new_packet_data[0..19*65]);
chacha.process(&SIXTY_FIVE_ZEROS[..], &mut new_packet_data[19*65..]);
let read_pos = chacha_stream.read(&mut new_packet_data).unwrap();
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Onion forward processing, is a bit tricky, could be better documented.

Like "Read leftover of incoming hops_data to 1300 0x00bytes. Then XOR rho-key pseudo-random stream (1300 - hop_data length)-bytes length to new buffer".

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Hmm, I think I added comments in response to Jeff's comments while you were posting this. Are you happy with it as it is now?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We can refactor it later, I think

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@arik-so What could be refactored here?

#[cfg(debug_assertions)]
{
// Check two things:
// a) that the behavior of our stream here will return Ok(0) even if the TLV
// read above emptied out our buffer and the unwrap() wont needlessly panic
// b) that we didn't somehow magically end up with extra data.
let mut t = [0; 1];
debug_assert!(chacha_stream.read(&mut t).unwrap() == 0);
}
// Once we've emptied the set of bytes our peer gave us, encrypt 0 bytes until we
// fill the onion hop data we'll forward to our next-hop peer.
chacha_stream.chacha.process_in_place(&mut new_packet_data[read_pos..]);

let mut new_pubkey = msg.onion_routing_packet.public_key.unwrap();

Expand All @@ -986,16 +1004,24 @@ impl<ChanSigner: ChannelKeys, M: Deref> ChannelManager<ChanSigner, M> where M::T
version: 0,
public_key,
hop_data: new_packet_data,
hmac: next_hop_data.hmac.clone(),
hmac: next_hop_hmac.clone(),
};

let short_channel_id = match next_hop_data.format {
msgs::OnionHopDataFormat::Legacy { short_channel_id } => short_channel_id,
msgs::OnionHopDataFormat::NonFinalNode { short_channel_id } => short_channel_id,
msgs::OnionHopDataFormat::FinalNode => {
return_err!("Final Node OnionHopData provided for us as an intermediary node", 0x4000 | 22, &[0;0]);
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

a3f101d

Following BOLT4, "the erring node may include that type and its byte offset in the decrypted byte stream." so here we may return type 6 and offset of short_channel_id (but is this making sense because now you have to save offset for every type at deserialization in case you fail validation ?)

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Type 6? The spec indicates that we could store where in the TLV stream we failed to read, and tell our peers that (I presume as a way to help debug errors, since the data isn't machine-respondable-to anyway). I'm super not gonna bother doing that.

Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I agree don't bother just comment maybe we don't strictly adhere to the spec recommendation (which seems really loose in this case)

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Its a recommendation, though, its MAY...we're still strictly adhering to the spec....I dont think we call it out in other places.

},
};

PendingHTLCStatus::Forward(PendingForwardHTLCInfo {
onion_packet: Some(outgoing_packet),
payment_hash: msg.payment_hash.clone(),
short_channel_id: next_hop_data.data.short_channel_id,
short_channel_id: short_channel_id,
incoming_shared_secret: shared_secret,
amt_to_forward: next_hop_data.data.amt_to_forward,
outgoing_cltv_value: next_hop_data.data.outgoing_cltv_value,
amt_to_forward: next_hop_data.amt_to_forward,
outgoing_cltv_value: next_hop_data.outgoing_cltv_value,
})
};

Expand Down Expand Up @@ -1137,6 +1163,9 @@ impl<ChanSigner: ChannelKeys, M: Deref> ChannelManager<ChanSigner, M> where M::T
let onion_keys = secp_call!(onion_utils::construct_onion_keys(&self.secp_ctx, &route, &session_priv),
APIError::RouteError{err: "Pubkey along hop was maliciously selected"});
let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(&route, cur_height)?;
if onion_utils::route_size_insane(&onion_payloads) {
return Err(APIError::RouteError{err: "Route size too large considering onion data"});
}
let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, prng_seed, &payment_hash);

let _ = self.total_consistency_lock.read().unwrap();
Expand Down
28 changes: 23 additions & 5 deletions lightning/src/ln/features.rs
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,10 @@ mod sealed { // You should just use the type aliases instead.
pub trait UpfrontShutdownScript: Context {}
impl UpfrontShutdownScript for InitContext {}
impl UpfrontShutdownScript for NodeContext {}

pub trait VariableLengthOnion: Context {}
impl VariableLengthOnion for InitContext {}
impl VariableLengthOnion for NodeContext {}
}

/// Tracks the set of features which a node implements, templated by the context in which it
Expand Down Expand Up @@ -69,7 +73,7 @@ impl InitFeatures {
/// Create a Features with the features we support
pub fn supported() -> InitFeatures {
InitFeatures {
flags: vec![2 | 1 << 5],
flags: vec![2 | 1 << 5, 1 << (9-8)],
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

we may wanna make these statements more reader-friendly at some point

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Maybe lets fix that in #440 or so. We're not making it worse here, and a followup would be good.

mark: PhantomData,
}
}
Expand Down Expand Up @@ -132,14 +136,14 @@ impl NodeFeatures {
#[cfg(not(feature = "fuzztarget"))]
pub(crate) fn supported() -> NodeFeatures {
NodeFeatures {
flags: vec![2 | 1 << 5],
flags: vec![2 | 1 << 5, 1 << (9-8)],
mark: PhantomData,
}
}
#[cfg(feature = "fuzztarget")]
pub fn supported() -> NodeFeatures {
NodeFeatures {
flags: vec![2 | 1 << 5],
flags: vec![2 | 1 << 5, 1 << (9-8)],
mark: PhantomData,
}
}
Expand Down Expand Up @@ -182,13 +186,21 @@ impl<T: sealed::Context> Features<T> {

pub(crate) fn requires_unknown_bits(&self) -> bool {
self.flags.iter().enumerate().any(|(idx, &byte)| {
( idx != 0 && (byte & 0x55) != 0 ) || ( idx == 0 && (byte & 0x14) != 0 )
(match idx {
0 => (byte & 0b00010100),
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

53a3c62

Shouldn't the bit pattern be 0b01000100 ? We support option_upfront_shutdown_script but not gossip_queries?

Anyway add comments like "(00) data_loss_protect - (01) initial_routing_sync - (00) upfront_shutdown_script ..."

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I believe this (and other issues) are fixed in #440.

1 => (byte & 0b01010100),
_ => (byte & 0b01010101),
}) != 0
})
}

pub(crate) fn supports_unknown_bits(&self) -> bool {
self.flags.iter().enumerate().any(|(idx, &byte)| {
( idx != 0 && byte != 0 ) || ( idx == 0 && (byte & 0xc4) != 0 )
(match idx {
0 => (byte & 0b11000100),
1 => (byte & 0b11111100),
_ => byte,
}) != 0
})
}

Expand Down Expand Up @@ -232,6 +244,12 @@ impl<T: sealed::UpfrontShutdownScript> Features<T> {
}
}

impl<T: sealed::VariableLengthOnion> Features<T> {
pub(crate) fn supports_variable_length_onion(&self) -> bool {
self.flags.len() > 1 && (self.flags[1] & 3) != 0
}
}

impl<T: sealed::InitialRoutingSync> Features<T> {
pub(crate) fn initial_routing_sync(&self) -> bool {
self.flags.len() > 0 && (self.flags[0] & (1 << 3)) != 0
Expand Down
42 changes: 34 additions & 8 deletions lightning/src/ln/functional_tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ use util::enforcing_trait_impls::EnforcingChannelKeys;
use util::test_utils;
use util::events::{Event, EventsProvider, MessageSendEvent, MessageSendEventsProvider};
use util::errors::APIError;
use util::ser::{Writeable, ReadableArgs};
use util::ser::{Writeable, Writer, ReadableArgs};
use util::config::UserConfig;
use util::logger::Logger;

Expand All @@ -44,7 +44,7 @@ use std::collections::{BTreeSet, HashMap, HashSet};
use std::default::Default;
use std::sync::{Arc, Mutex};
use std::sync::atomic::Ordering;
use std::mem;
use std::{mem, io};

use rand::{thread_rng, Rng};

Expand Down Expand Up @@ -5007,6 +5007,20 @@ impl msgs::ChannelUpdate {
}
}

struct BogusOnionHopData {
data: Vec<u8>
}
impl BogusOnionHopData {
fn new(orig: msgs::OnionHopData) -> Self {
Self { data: orig.encode() }
}
}
impl Writeable for BogusOnionHopData {
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
writer.write_all(&self.data[..])
}
}

#[test]
fn test_onion_failure() {
use ln::msgs::ChannelUpdate;
Expand Down Expand Up @@ -5036,19 +5050,31 @@ fn test_onion_failure() {
let cur_height = nodes[0].node.latest_block_height.load(Ordering::Acquire) as u32 + 1;
let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::new(), &route, &session_priv).unwrap();
let (mut onion_payloads, _htlc_msat, _htlc_cltv) = onion_utils::build_onion_payloads(&route, cur_height).unwrap();
onion_payloads[0].realm = 3;
msg.onion_routing_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &payment_hash);
}, ||{}, true, Some(PERM|1), Some(msgs::HTLCFailChannelUpdate::ChannelClosed{short_channel_id: channels[1].0.contents.short_channel_id, is_permanent: true}));//XXX incremented channels idx here
let mut new_payloads = Vec::new();
for payload in onion_payloads.drain(..) {
new_payloads.push(BogusOnionHopData::new(payload));
}
// break the first (non-final) hop payload by swapping the realm (0) byte for a byte
// describing a length-1 TLV payload, which is obviously bogus.
new_payloads[0].data[0] = 1;
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The significance of this is unclear to me. Is this the version byte? Can we use a constant for it?

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Its just to invalidate it, added a comment to note what it does.

msg.onion_routing_packet = onion_utils::construct_onion_packet_bogus_hopdata(new_payloads, onion_keys, [0; 32], &payment_hash);
}, ||{}, true, Some(PERM|22), Some(msgs::HTLCFailChannelUpdate::ChannelClosed{short_channel_id: channels[1].0.contents.short_channel_id, is_permanent: true}));//XXX incremented channels idx here

// final node failure
run_onion_failure_test("invalid_realm", 3, &nodes, &route, &payment_hash, |msg| {
let session_priv = SecretKey::from_slice(&[3; 32]).unwrap();
let cur_height = nodes[0].node.latest_block_height.load(Ordering::Acquire) as u32 + 1;
let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::new(), &route, &session_priv).unwrap();
let (mut onion_payloads, _htlc_msat, _htlc_cltv) = onion_utils::build_onion_payloads(&route, cur_height).unwrap();
onion_payloads[1].realm = 3;
msg.onion_routing_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &payment_hash);
}, ||{}, false, Some(PERM|1), Some(msgs::HTLCFailChannelUpdate::ChannelClosed{short_channel_id: channels[1].0.contents.short_channel_id, is_permanent: true}));
let mut new_payloads = Vec::new();
for payload in onion_payloads.drain(..) {
new_payloads.push(BogusOnionHopData::new(payload));
}
// break the last-hop payload by swapping the realm (0) byte for a byte describing a
// length-1 TLV payload, which is obviously bogus.
new_payloads[1].data[0] = 1;
msg.onion_routing_packet = onion_utils::construct_onion_packet_bogus_hopdata(new_payloads, onion_keys, [0; 32], &payment_hash);
}, ||{}, false, Some(PERM|22), Some(msgs::HTLCFailChannelUpdate::ChannelClosed{short_channel_id: channels[1].0.contents.short_channel_id, is_permanent: true}));

// the following three with run_onion_failure_test_with_fail_intercept() test only the origin node
// receiving simulated fail messages
Expand Down
Loading