diff --git a/lightning/src/ln/chanmon_update_fail_tests.rs b/lightning/src/ln/chanmon_update_fail_tests.rs index 91575195a88..094c9bc38d1 100644 --- a/lightning/src/ln/chanmon_update_fail_tests.rs +++ b/lightning/src/ln/chanmon_update_fail_tests.rs @@ -130,7 +130,9 @@ fn test_monitor_and_persister_update_fail() { let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); assert_eq!(updates.update_fulfill_htlcs.len(), 1); nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]); - if let Some(ref mut channel) = nodes[0].node.channel_state.lock().unwrap().by_id.get_mut(&chan.2) { + if let Some(ref mut channel) = nodes[0].node.per_peer_state.write().unwrap().get_mut(&nodes[1].node.get_our_node_id()) + .unwrap().lock().unwrap().channel_by_id.get_mut(&chan.2) + { if let Ok((_, _, update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) { // Check that even though the persister is returning a TemporaryFailure, // because the update is bogus, ultimately the error that's returned @@ -1411,9 +1413,12 @@ fn monitor_failed_no_reestablish_response() { let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).2; { - let mut lock; - get_channel_ref!(nodes[0], lock, channel_id).announcement_sigs_state = AnnouncementSigsState::PeerReceived; - get_channel_ref!(nodes[1], lock, channel_id).announcement_sigs_state = AnnouncementSigsState::PeerReceived; + let mut node_0_per_peer_lock; + let mut node_0_peer_state_lock; + let mut node_1_per_peer_lock; + let mut node_1_peer_state_lock; + get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, channel_id).announcement_sigs_state = AnnouncementSigsState::PeerReceived; + get_channel_ref!(nodes[1], nodes[0], node_1_per_peer_lock, node_1_peer_state_lock, channel_id).announcement_sigs_state = AnnouncementSigsState::PeerReceived; } // Route the payment and deliver the initial commitment_signed (with a monitor update failure diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 9b47770f120..0ba3c9af70e 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -4828,8 +4828,9 @@ impl Channel { // the funding transaction is at least still in the mempool of most nodes). // // Note that ideally we wouldn't force-close if we see *any* reorg on a 1-conf or - // 0-conf channel, but not doing so may lead to the `ChannelManager::short_to_id` map - // being inconsistent, so we currently have to. + // 0-conf channel, but not doing so may lead to the + // `ChannelManager::short_to_chan_info` map being inconsistent, so we currently have + // to. if funding_tx_confirmations == 0 && self.funding_tx_confirmed_in.is_some() { let err_reason = format!("Funding transaction was un-confirmed. Locked at {} confs, now have {} confs.", self.minimum_depth.unwrap(), funding_tx_confirmations); diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index cf593068947..c4178d0e3ee 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -64,13 +64,14 @@ use prelude::*; use core::{cmp, mem}; use core::cell::RefCell; use io::Read; -use sync::{Arc, Condvar, Mutex, MutexGuard, RwLock, RwLockReadGuard}; +use sync::{Arc, Condvar, Mutex, MutexGuard, RwLock, RwLockReadGuard, FairRwLock}; use core::sync::atomic::{AtomicUsize, Ordering}; use core::time::Duration; use core::ops::Deref; #[cfg(any(test, feature = "std"))] use std::time::Instant; +use std::any::Any; use util::crypto::sign; // We hold various information about HTLC relay in the HTLC objects in Channel itself: @@ -404,12 +405,13 @@ pub(super) enum RAACommitmentOrder { } // Note this is only exposed in cfg(test): -pub(super) struct ChannelHolder { - pub(super) by_id: HashMap<[u8; 32], Channel>, - /// SCIDs (and outbound SCID aliases) to the real channel id. Outbound SCID aliases are added - /// here once the channel is available for normal use, with SCIDs being added once the funding - /// transaction is confirmed at the channel's required confirmation depth. - pub(super) short_to_id: HashMap, +pub(super) struct ChannelHolder { + /// SCIDs (and outbound SCID aliases) -> `counterparty_node_id`s and `channel_id`s. + /// + /// Outbound SCID aliases are added here once the channel is available for normal use, with + /// SCIDs being added once the funding transaction is confirmed at the channel's required + /// confirmation depth. + pub(super) short_to_chan_info: HashMap, /// SCID/SCID Alias -> forward infos. Key of 0 means payments received. /// /// Note that because we may have an SCID Alias as the key we can have two entries per channel, @@ -441,9 +443,15 @@ enum BackgroundEvent { ClosingMonitorUpdate((OutPoint, ChannelMonitorUpdate)), } -/// State we hold per-peer. In the future we should put channels in here, but for now we only hold -/// the latest Init features we heard from the peer. -struct PeerState { +/// State we hold per-peer. +pub(super) struct PeerState { + /// `temporary_channel_id` or `channel_id` -> `channel`. + /// + /// Holds all channels where the peer is the counterparty. Once a channel has been assigned a + /// `channel_id`, the `temporary_channel_id` key in the map is updated and is replaced by the + /// `channel_id`. + pub(super) channel_by_id: HashMap<[u8; 32], Channel>, + /// The latest `InitFeatures` we heard from the peer. latest_features: InitFeatures, } @@ -706,9 +714,9 @@ pub struct ChannelManager, #[cfg(any(test, feature = "_test_utils"))] - pub(super) channel_state: Mutex>, + pub(super) channel_state: Mutex, #[cfg(not(any(test, feature = "_test_utils")))] - channel_state: Mutex>, + channel_state: Mutex, /// Storage for PaymentSecrets and any requirements on future inbound payments before we will /// expose them to users via a PaymentReceived event. HTLCs which do not meet the requirements @@ -737,6 +745,25 @@ pub struct ChannelManager>, + /// `channel_id` -> `counterparty_node_id`. + /// + /// Only `channel_id`s are allowed as keys in this map, and not `temporary_channel_id`s. As + /// multiple channels with the same `temporary_channel_id` to different peers can exist, + /// allowing `temporary_channel_id`s in this map would cause collisions for such channels. + /// + /// Note that this map should only be used for `MonitorEvent` handling, to be able to access + /// the corresponding channel for the event, as we only have access to the `channel_id` during + /// the handling of the events. + /// + /// TODO: + /// The `counterparty_node_id` isn't passed with `MonitorEvent`s currently. To pass it, we need + /// to add the `counterparty_node_id` to `ChannelMonitor`s. However, adding it as a required + /// field in `ChannelMonitor`s would break backwards compatability. + /// We should add `counterparty_node_id`s to `MonitorEvent`s, and eventually rely on it in the + /// future. That would make this map redundant, as only the `ChannelManager::per_peer_state` is + /// required to access the channel with the `counterparty_node_id`. + id_to_peer: Mutex>, + our_network_key: SecretKey, our_network_pubkey: PublicKey, @@ -758,15 +785,25 @@ pub struct ChannelManager>>, + #[cfg(not(any(test, feature = "_test_utils")))] + per_peer_state: FairRwLock>>>, + #[cfg(any(test, feature = "_test_utils"))] + pub(super) per_peer_state: FairRwLock>>>, pending_events: Mutex>, pending_background_events: Mutex>, @@ -1229,9 +1266,9 @@ macro_rules! handle_error { } macro_rules! update_maps_on_chan_removal { - ($self: expr, $short_to_id: expr, $channel: expr) => { + ($self: expr, $short_to_chan_info: expr, $channel: expr) => { if let Some(short_id) = $channel.get_short_channel_id() { - $short_to_id.remove(&short_id); + $short_to_chan_info.remove(&short_id); } else { // If the channel was never confirmed on-chain prior to its closure, remove the // outbound SCID alias we used for it from the collision-prevention set. While we @@ -1242,13 +1279,14 @@ macro_rules! update_maps_on_chan_removal { let alias_removed = $self.outbound_scid_aliases.lock().unwrap().remove(&$channel.outbound_scid_alias()); debug_assert!(alias_removed); } - $short_to_id.remove(&$channel.outbound_scid_alias()); + $self.id_to_peer.lock().unwrap().remove(&$channel.channel_id()); + $short_to_chan_info.remove(&$channel.outbound_scid_alias()); } } /// Returns (boolean indicating if we should remove the Channel object from memory, a mapped error) macro_rules! convert_chan_err { - ($self: ident, $err: expr, $short_to_id: expr, $channel: expr, $channel_id: expr) => { + ($self: ident, $err: expr, $short_to_chan_info: expr, $channel: expr, $channel_id: expr) => { match $err { ChannelError::Warn(msg) => { (false, MsgHandleErrInternal::from_chan_no_close(ChannelError::Warn(msg), $channel_id.clone())) @@ -1258,14 +1296,14 @@ macro_rules! convert_chan_err { }, ChannelError::Close(msg) => { log_error!($self.logger, "Closing channel {} due to close-required error: {}", log_bytes!($channel_id[..]), msg); - update_maps_on_chan_removal!($self, $short_to_id, $channel); + update_maps_on_chan_removal!($self, $short_to_chan_info, $channel); let shutdown_res = $channel.force_shutdown(true); (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, $channel.get_user_id(), shutdown_res, $self.get_channel_update_for_broadcast(&$channel).ok())) }, ChannelError::CloseDelayBroadcast(msg) => { log_error!($self.logger, "Channel {} need to be shutdown but closing transactions not broadcast due to {}", log_bytes!($channel_id[..]), msg); - update_maps_on_chan_removal!($self, $short_to_id, $channel); + update_maps_on_chan_removal!($self, $short_to_chan_info, $channel); let shutdown_res = $channel.force_shutdown(false); (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, $channel.get_user_id(), shutdown_res, $self.get_channel_update_for_broadcast(&$channel).ok())) @@ -1279,7 +1317,7 @@ macro_rules! break_chan_entry { match $res { Ok(res) => res, Err(e) => { - let (drop, res) = convert_chan_err!($self, e, $channel_state.short_to_id, $entry.get_mut(), $entry.key()); + let (drop, res) = convert_chan_err!($self, e, $channel_state.short_to_chan_info, $entry.get_mut(), $entry.key()); if drop { $entry.remove_entry(); } @@ -1294,7 +1332,7 @@ macro_rules! try_chan_entry { match $res { Ok(res) => res, Err(e) => { - let (drop, res) = convert_chan_err!($self, e, $channel_state.short_to_id, $entry.get_mut(), $entry.key()); + let (drop, res) = convert_chan_err!($self, e, $channel_state.short_to_chan_info, $entry.get_mut(), $entry.key()); if drop { $entry.remove_entry(); } @@ -1308,18 +1346,18 @@ macro_rules! remove_channel { ($self: expr, $channel_state: expr, $entry: expr) => { { let channel = $entry.remove_entry().1; - update_maps_on_chan_removal!($self, $channel_state.short_to_id, channel); + update_maps_on_chan_removal!($self, $channel_state.short_to_chan_info, channel); channel } } } macro_rules! handle_monitor_err { - ($self: ident, $err: expr, $short_to_id: expr, $chan: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $resend_channel_ready: expr, $failed_forwards: expr, $failed_fails: expr, $failed_finalized_fulfills: expr, $chan_id: expr) => { + ($self: ident, $err: expr, $short_to_chan_info: expr, $chan: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $resend_channel_ready: expr, $failed_forwards: expr, $failed_fails: expr, $failed_finalized_fulfills: expr, $chan_id: expr) => { match $err { ChannelMonitorUpdateErr::PermanentFailure => { log_error!($self.logger, "Closing channel {} due to monitor update ChannelMonitorUpdateErr::PermanentFailure", log_bytes!($chan_id[..])); - update_maps_on_chan_removal!($self, $short_to_id, $chan); + update_maps_on_chan_removal!($self, $short_to_chan_info, $chan); // TODO: $failed_fails is dropped here, which will cause other channels to hit the // chain in a confused state! We need to move them into the ChannelMonitor which // will be responsible for failing backwards once things confirm on-chain. @@ -1359,7 +1397,7 @@ macro_rules! handle_monitor_err { } }; ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $resend_channel_ready: expr, $failed_forwards: expr, $failed_fails: expr, $failed_finalized_fulfills: expr) => { { - let (res, drop) = handle_monitor_err!($self, $err, $channel_state.short_to_id, $entry.get_mut(), $action_type, $resend_raa, $resend_commitment, $resend_channel_ready, $failed_forwards, $failed_fails, $failed_finalized_fulfills, $entry.key()); + let (res, drop) = handle_monitor_err!($self, $err, $channel_state.short_to_chan_info, $entry.get_mut(), $action_type, $resend_raa, $resend_commitment, $resend_channel_ready, $failed_forwards, $failed_fails, $failed_finalized_fulfills, $entry.key()); if drop { $entry.remove_entry(); } @@ -1405,19 +1443,19 @@ macro_rules! maybe_break_monitor_err { } macro_rules! send_channel_ready { - ($short_to_id: expr, $pending_msg_events: expr, $channel: expr, $channel_ready_msg: expr) => { + ($short_to_chan_info: expr, $pending_msg_events: expr, $channel: expr, $channel_ready_msg: expr) => { $pending_msg_events.push(events::MessageSendEvent::SendChannelReady { node_id: $channel.get_counterparty_node_id(), msg: $channel_ready_msg, }); // Note that we may send a `channel_ready` multiple times for a channel if we reconnect, so // we allow collisions, but we shouldn't ever be updating the channel ID pointed to. - let outbound_alias_insert = $short_to_id.insert($channel.outbound_scid_alias(), $channel.channel_id()); - assert!(outbound_alias_insert.is_none() || outbound_alias_insert.unwrap() == $channel.channel_id(), + let outbound_alias_insert = $short_to_chan_info.insert($channel.outbound_scid_alias(), ($channel.get_counterparty_node_id(), $channel.channel_id())); + assert!(outbound_alias_insert.is_none() || outbound_alias_insert.unwrap() == ($channel.get_counterparty_node_id(), $channel.channel_id()), "SCIDs should never collide - ensure you weren't behind the chain tip by a full month when creating channels"); if let Some(real_scid) = $channel.get_short_channel_id() { - let scid_insert = $short_to_id.insert(real_scid, $channel.channel_id()); - assert!(scid_insert.is_none() || scid_insert.unwrap() == $channel.channel_id(), + let scid_insert = $short_to_chan_info.insert(real_scid, ($channel.get_counterparty_node_id(), $channel.channel_id())); + assert!(scid_insert.is_none() || scid_insert.unwrap() == ($channel.get_counterparty_node_id(), $channel.channel_id()), "SCIDs should never collide - ensure you weren't behind the chain tip by a full month when creating channels"); } } @@ -1457,7 +1495,7 @@ macro_rules! handle_chan_restoration_locked { // Similar to the above, this implies that we're letting the channel_ready fly // before it should be allowed to. assert!(chanmon_update.is_none()); - send_channel_ready!($channel_state.short_to_id, $channel_state.pending_msg_events, $channel_entry.get(), msg); + send_channel_ready!($channel_state.short_to_chan_info, $channel_state.pending_msg_events, $channel_entry.get(), msg); } if let Some(msg) = $announcement_sigs { $channel_state.pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures { @@ -1578,8 +1616,7 @@ impl ChannelMana best_block: RwLock::new(params.best_block), channel_state: Mutex::new(ChannelHolder{ - by_id: HashMap::new(), - short_to_id: HashMap::new(), + short_to_chan_info: HashMap::new(), forward_htlcs: HashMap::new(), claimable_htlcs: HashMap::new(), pending_msg_events: Vec::new(), @@ -1587,6 +1624,7 @@ impl ChannelMana outbound_scid_aliases: Mutex::new(HashSet::new()), pending_inbound_payments: Mutex::new(HashMap::new()), pending_outbound_payments: Mutex::new(HashMap::new()), + id_to_peer: Mutex::new(HashMap::new()), our_network_key: keys_manager.get_node_secret(Recipient::Node).unwrap(), our_network_pubkey: PublicKey::from_secret_key(&secp_ctx, &keys_manager.get_node_secret(Recipient::Node).unwrap()), @@ -1598,7 +1636,7 @@ impl ChannelMana last_node_announcement_serial: AtomicUsize::new(0), highest_seen_timestamp: AtomicUsize::new(0), - per_peer_state: RwLock::new(HashMap::new()), + per_peer_state: FairRwLock::new(HashMap::new()), pending_events: Mutex::new(Vec::new()), pending_background_events: Mutex::new(Vec::new()), @@ -1666,12 +1704,18 @@ impl ChannelMana return Err(APIError::APIMisuseError { err: format!("Channel value must be at least 1000 satoshis. It was {}", channel_value_satoshis) }); } - let channel = { - let per_peer_state = self.per_peer_state.read().unwrap(); - match per_peer_state.get(&their_network_key) { - Some(peer_state) => { + let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); + // We want to make sure the lock is actually acquired by PersistenceNotifierGuard. + debug_assert!(&self.total_consistency_lock.try_write().is_err()); + + let mut channel_state = self.channel_state.lock().unwrap(); + let per_peer_state = self.per_peer_state.read().unwrap(); + match per_peer_state.get(&their_network_key) { + None => return Err(APIError::APIMisuseError { err: format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", their_network_key) }), + Some(peer_state) => { + let mut peer_state = peer_state.lock().unwrap(); + let channel = { let outbound_scid_alias = self.create_and_insert_outbound_scid_alias(); - let peer_state = peer_state.lock().unwrap(); let their_features = &peer_state.latest_features; let config = if override_config.is_some() { override_config.as_ref().unwrap() } else { &self.default_configuration }; match Channel::new_outbound(&self.fee_estimator, &self.keys_manager, their_network_key, @@ -1684,89 +1728,91 @@ impl ChannelMana return Err(e); }, } - }, - None => return Err(APIError::ChannelUnavailable { err: format!("Not connected to node: {}", their_network_key) }), - } - }; - let res = channel.get_open_channel(self.genesis_hash.clone()); + }; + let res = channel.get_open_channel(self.genesis_hash.clone()); - let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); - // We want to make sure the lock is actually acquired by PersistenceNotifierGuard. - debug_assert!(&self.total_consistency_lock.try_write().is_err()); + let temporary_channel_id = channel.channel_id(); - let temporary_channel_id = channel.channel_id(); - let mut channel_state = self.channel_state.lock().unwrap(); - match channel_state.by_id.entry(temporary_channel_id) { - hash_map::Entry::Occupied(_) => { - if cfg!(fuzzing) { - return Err(APIError::APIMisuseError { err: "Fuzzy bad RNG".to_owned() }); - } else { - panic!("RNG is bad???"); + match peer_state.channel_by_id.entry(temporary_channel_id) { + hash_map::Entry::Occupied(_) => { + if cfg!(fuzzing) { + return Err(APIError::APIMisuseError { err: "Fuzzy bad RNG".to_owned() }); + } else { + panic!("RNG is bad???"); + } + }, + hash_map::Entry::Vacant(entry) => { entry.insert(channel); } } - }, - hash_map::Entry::Vacant(entry) => { entry.insert(channel); } + + channel_state.pending_msg_events.push(events::MessageSendEvent::SendOpenChannel { + node_id: their_network_key, + msg: res, + }); + Ok(temporary_channel_id) + } } - channel_state.pending_msg_events.push(events::MessageSendEvent::SendOpenChannel { - node_id: their_network_key, - msg: res, - }); - Ok(temporary_channel_id) } - fn list_channels_with_filter)) -> bool>(&self, f: Fn) -> Vec { + fn list_channels_with_filter)) -> bool + Copy>(&self, f: Fn) -> Vec { let mut res = Vec::new(); { let channel_state = self.channel_state.lock().unwrap(); - res.reserve(channel_state.by_id.len()); - for (channel_id, channel) in channel_state.by_id.iter().filter(f) { - let balance = channel.get_available_balances(); - let (to_remote_reserve_satoshis, to_self_reserve_satoshis) = - channel.get_holder_counterparty_selected_channel_reserve_satoshis(); - res.push(ChannelDetails { - channel_id: (*channel_id).clone(), - counterparty: ChannelCounterparty { - node_id: channel.get_counterparty_node_id(), - features: InitFeatures::empty(), - unspendable_punishment_reserve: to_remote_reserve_satoshis, - forwarding_info: channel.counterparty_forwarding_info(), - // Ensures that we have actually received the `htlc_minimum_msat` value - // from the counterparty through the `OpenChannel` or `AcceptChannel` - // message (as they are always the first message from the counterparty). - // Else `Channel::get_counterparty_htlc_minimum_msat` could return the - // default `0` value set by `Channel::new_outbound`. - outbound_htlc_minimum_msat: if channel.have_received_message() { - Some(channel.get_counterparty_htlc_minimum_msat()) } else { None }, - outbound_htlc_maximum_msat: channel.get_counterparty_htlc_maximum_msat(), - }, - funding_txo: channel.get_funding_txo(), - // Note that accept_channel (or open_channel) is always the first message, so - // `have_received_message` indicates that type negotiation has completed. - channel_type: if channel.have_received_message() { Some(channel.get_channel_type().clone()) } else { None }, - short_channel_id: channel.get_short_channel_id(), - outbound_scid_alias: if channel.is_usable() { Some(channel.outbound_scid_alias()) } else { None }, - inbound_scid_alias: channel.latest_inbound_scid_alias(), - channel_value_satoshis: channel.get_value_satoshis(), - unspendable_punishment_reserve: to_self_reserve_satoshis, - balance_msat: balance.balance_msat, - inbound_capacity_msat: balance.inbound_capacity_msat, - outbound_capacity_msat: balance.outbound_capacity_msat, - next_outbound_htlc_limit_msat: balance.next_outbound_htlc_limit_msat, - user_channel_id: channel.get_user_id(), - confirmations_required: channel.minimum_depth(), - force_close_spend_delay: channel.get_counterparty_selected_contest_delay(), - is_outbound: channel.is_outbound(), - is_channel_ready: channel.is_usable(), - is_usable: channel.is_live(), - is_public: channel.should_announce(), - inbound_htlc_minimum_msat: Some(channel.get_holder_htlc_minimum_msat()), - inbound_htlc_maximum_msat: channel.get_holder_htlc_maximum_msat() - }); - } - } - let per_peer_state = self.per_peer_state.read().unwrap(); - for chan in res.iter_mut() { - if let Some(peer_state) = per_peer_state.get(&chan.counterparty.node_id) { - chan.counterparty.features = peer_state.lock().unwrap().latest_features.clone(); + // Allocate our best estimate of the number of channels we have in the `res` + // Vec. Sadly the `channel_state.short_to_chan_info` map doesn't cover channels without + // a scid or a scid alias, and the `channel_state.id_to_peer` shouldn't be used outside + // of the ChannelMonitor handling. Therefore reallocations may still occur, but is + // unlikely as the `channel_state.short_to_chan_info` map often contains 2 entries for + // the same channel. + res.reserve(channel_state.short_to_chan_info.len()); + + let per_peer_state = self.per_peer_state.read().unwrap(); + for (_cp_id, peer_state_mutex) in per_peer_state.iter() { + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + for (channel_id, channel) in peer_state.channel_by_id.iter().filter(f) { + let balance = channel.get_available_balances(); + let (to_remote_reserve_satoshis, to_self_reserve_satoshis) = + channel.get_holder_counterparty_selected_channel_reserve_satoshis(); + res.push(ChannelDetails { + channel_id: (*channel_id).clone(), + counterparty: ChannelCounterparty { + node_id: channel.get_counterparty_node_id(), + features: peer_state.latest_features.clone(), + unspendable_punishment_reserve: to_remote_reserve_satoshis, + forwarding_info: channel.counterparty_forwarding_info(), + // Ensures that we have actually received the `htlc_minimum_msat` value + // from the counterparty through the `OpenChannel` or `AcceptChannel` + // message (as they are always the first message from the counterparty). + // Else `Channel::get_counterparty_htlc_minimum_msat` could return the + // default `0` value set by `Channel::new_outbound`. + outbound_htlc_minimum_msat: if channel.have_received_message() { + Some(channel.get_counterparty_htlc_minimum_msat()) } else { None }, + outbound_htlc_maximum_msat: channel.get_counterparty_htlc_maximum_msat(), + }, + funding_txo: channel.get_funding_txo(), + // Note that accept_channel (or open_channel) is always the first message, so + // `have_received_message` indicates that type negotiation has completed. + channel_type: if channel.have_received_message() { Some(channel.get_channel_type().clone()) } else { None }, + short_channel_id: channel.get_short_channel_id(), + outbound_scid_alias: if channel.is_usable() { Some(channel.outbound_scid_alias()) } else { None }, + inbound_scid_alias: channel.latest_inbound_scid_alias(), + channel_value_satoshis: channel.get_value_satoshis(), + unspendable_punishment_reserve: to_self_reserve_satoshis, + balance_msat: balance.balance_msat, + inbound_capacity_msat: balance.inbound_capacity_msat, + outbound_capacity_msat: balance.outbound_capacity_msat, + next_outbound_htlc_limit_msat: balance.next_outbound_htlc_limit_msat, + user_channel_id: channel.get_user_id(), + confirmations_required: channel.minimum_depth(), + force_close_spend_delay: channel.get_counterparty_selected_contest_delay(), + is_outbound: channel.is_outbound(), + is_channel_ready: channel.is_usable(), + is_usable: channel.is_live(), + is_public: channel.should_announce(), + inbound_htlc_minimum_msat: Some(channel.get_holder_htlc_minimum_msat()), + inbound_htlc_maximum_msat: channel.get_holder_htlc_maximum_msat() + }); + } } } res @@ -1816,51 +1862,47 @@ impl ChannelMana let result: Result<(), _> = loop { let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_state_lock; - match channel_state.by_id.entry(channel_id.clone()) { - hash_map::Entry::Occupied(mut chan_entry) => { - if *counterparty_node_id != chan_entry.get().get_counterparty_node_id(){ - return Err(APIError::APIMisuseError { err: "The passed counterparty_node_id doesn't match the channel's counterparty node_id".to_owned() }); - } - let per_peer_state = self.per_peer_state.read().unwrap(); - let (shutdown_msg, monitor_update, htlcs) = match per_peer_state.get(&counterparty_node_id) { - Some(peer_state) => { - let peer_state = peer_state.lock().unwrap(); - let their_features = &peer_state.latest_features; - chan_entry.get_mut().get_shutdown(&self.keys_manager, their_features, target_feerate_sats_per_1000_weight)? - }, - None => return Err(APIError::ChannelUnavailable { err: format!("Not connected to node: {}", counterparty_node_id) }), - }; - failed_htlcs = htlcs; - - // Update the monitor with the shutdown script if necessary. - if let Some(monitor_update) = monitor_update { - if let Err(e) = self.chain_monitor.update_channel(chan_entry.get().get_funding_txo().unwrap(), monitor_update) { - let (result, is_permanent) = - handle_monitor_err!(self, e, channel_state.short_to_id, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, chan_entry.key(), NO_UPDATE); - if is_permanent { - remove_channel!(self, channel_state, chan_entry); - break result; + let per_peer_state = self.per_peer_state.read().unwrap(); + if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) { + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + match peer_state.channel_by_id.entry(channel_id.clone()) { + hash_map::Entry::Occupied(mut chan_entry) => { + let (shutdown_msg, monitor_update, htlcs) = chan_entry.get_mut().get_shutdown(&self.keys_manager, &peer_state.latest_features, target_feerate_sats_per_1000_weight)?; + failed_htlcs = htlcs; + + // Update the monitor with the shutdown script if necessary. + if let Some(monitor_update) = monitor_update { + if let Err(e) = self.chain_monitor.update_channel(chan_entry.get().get_funding_txo().unwrap(), monitor_update) { + let (result, is_permanent) = + handle_monitor_err!(self, e, channel_state.short_to_chan_info, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, chan_entry.key(), NO_UPDATE); + if is_permanent { + remove_channel!(self, channel_state, chan_entry); + break result; + } } } - } - channel_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown { - node_id: *counterparty_node_id, - msg: shutdown_msg - }); + channel_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown { + node_id: *counterparty_node_id, + msg: shutdown_msg + }); - if chan_entry.get().is_shutdown() { - let channel = remove_channel!(self, channel_state, chan_entry); - if let Ok(channel_update) = self.get_channel_update_for_broadcast(&channel) { - channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { - msg: channel_update - }); + if chan_entry.get().is_shutdown() { + let channel = remove_channel!(self, channel_state, chan_entry); + if let Ok(channel_update) = self.get_channel_update_for_broadcast(&channel) { + channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { + msg: channel_update + }); + } + self.issue_channel_close_events(&channel, ClosureReason::HolderForceClosed); } - self.issue_channel_close_events(&channel, ClosureReason::HolderForceClosed); - } - break Ok(()); - }, - hash_map::Entry::Vacant(_) => return Err(APIError::ChannelUnavailable{err: "No such channel".to_owned()}) + break Ok(()); + }, + hash_map::Entry::Vacant(_) => return Err(APIError::ChannelUnavailable{err: format!("No such channel for the passed counterparty_node_id {}", counterparty_node_id) }) + } + } else { + return Err(APIError::APIMisuseError { err: format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id) }); } }; @@ -1938,18 +1980,22 @@ impl ChannelMana let mut chan = { let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_state_lock; - if let hash_map::Entry::Occupied(chan) = channel_state.by_id.entry(channel_id.clone()) { - if chan.get().get_counterparty_node_id() != *peer_node_id { - return Err(APIError::ChannelUnavailable{err: "No such channel".to_owned()}); - } - if let Some(peer_msg) = peer_msg { - self.issue_channel_close_events(chan.get(),ClosureReason::CounterpartyForceClosed { peer_msg: peer_msg.to_string() }); + let per_peer_state = self.per_peer_state.read().unwrap(); + if let Some(peer_state_mutex) = per_peer_state.get(peer_node_id) { + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + if let hash_map::Entry::Occupied(chan) = peer_state.channel_by_id.entry(channel_id.clone()) { + if let Some(peer_msg) = peer_msg { + self.issue_channel_close_events(chan.get(),ClosureReason::CounterpartyForceClosed { peer_msg: peer_msg.to_string() }); + } else { + self.issue_channel_close_events(chan.get(),ClosureReason::HolderForceClosed); + } + remove_channel!(self, channel_state, chan) } else { - self.issue_channel_close_events(chan.get(),ClosureReason::HolderForceClosed); + return Err(APIError::ChannelUnavailable{ err: format!("No such channel for the passed counterparty_node_id {}", peer_node_id) }); } - remove_channel!(self, channel_state, chan) } else { - return Err(APIError::ChannelUnavailable{err: "No such channel".to_owned()}); + return Err(APIError::APIMisuseError{ err: format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", peer_node_id) }); } }; log_error!(self.logger, "Force-closing channel {}", log_bytes!(channel_id[..])); @@ -2091,7 +2137,7 @@ impl ChannelMana }) } - fn decode_update_add_htlc_onion(&self, msg: &msgs::UpdateAddHTLC) -> (PendingHTLCStatus, MutexGuard>) { + fn decode_update_add_htlc_onion(&self, msg: &msgs::UpdateAddHTLC) -> (PendingHTLCStatus, MutexGuard) { macro_rules! return_malformed_err { ($msg: expr, $err_code: expr) => { { @@ -2211,9 +2257,9 @@ impl ChannelMana // with a short_channel_id of 0. This is important as various things later assume // short_channel_id is non-0 in any ::Forward. if let &PendingHTLCRouting::Forward { ref short_channel_id, .. } = routing { - let id_option = channel_state.as_ref().unwrap().short_to_id.get(&short_channel_id).cloned(); + let id_option = channel_state.as_ref().unwrap().short_to_chan_info.get(&short_channel_id).cloned(); if let Some((err, code, chan_update)) = loop { - let forwarding_id_opt = match id_option { + let forwarding_chan_info_opt = match id_option { None => { // unknown_next_peer // Note that this is likely a timing oracle for detecting whether an scid is a // phantom. @@ -2223,10 +2269,14 @@ impl ChannelMana break Some(("Don't have available channel for forwarding as requested.", 0x4000 | 10, None)); } }, - Some(id) => Some(id.clone()), + Some((cp_id, id)) => Some((cp_id.clone(), id.clone())), }; - let (chan_update_opt, forwardee_cltv_expiry_delta) = if let Some(forwarding_id) = forwarding_id_opt { - let chan = channel_state.as_mut().unwrap().by_id.get_mut(&forwarding_id).unwrap(); + let (chan_update_opt, forwardee_cltv_expiry_delta) = if let Some((counterparty_node_id, forwarding_id)) = forwarding_chan_info_opt { + let per_peer_state = self.per_peer_state.read().unwrap(); + let peer_state_mutex = per_peer_state.get(&counterparty_node_id).unwrap(); + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + let chan = peer_state.channel_by_id.get_mut(&forwarding_id).unwrap(); if !chan.should_announce() && !self.default_configuration.accept_forwards_to_priv_channels { // Note that the behavior here should be identical to the above block - we // should NOT reveal the existence or non-existence of a private channel if @@ -2402,9 +2452,9 @@ impl ChannelMana } } - let id = match channel_lock.short_to_id.get(&path.first().unwrap().short_channel_id) { + let (counterparty_node_id, id) = match channel_lock.short_to_chan_info.get(&path.first().unwrap().short_channel_id) { None => return Err(APIError::ChannelUnavailable{err: "No channel available with first hop!".to_owned()}), - Some(id) => id.clone(), + Some((cp_id, id)) => (cp_id.clone(), id.clone()), }; macro_rules! insert_outbound_payment { @@ -2423,54 +2473,59 @@ impl ChannelMana } let channel_state = &mut *channel_lock; - if let hash_map::Entry::Occupied(mut chan) = channel_state.by_id.entry(id) { - match { - if chan.get().get_counterparty_node_id() != path.first().unwrap().pubkey { - return Err(APIError::RouteError{err: "Node ID mismatch on first hop!"}); - } - if !chan.get().is_live() { - return Err(APIError::ChannelUnavailable{err: "Peer for first hop currently disconnected/pending monitor update!".to_owned()}); - } - break_chan_entry!(self, chan.get_mut().send_htlc_and_commit( - htlc_msat, payment_hash.clone(), htlc_cltv, HTLCSource::OutboundRoute { - path: path.clone(), - session_priv: session_priv.clone(), - first_hop_htlc_msat: htlc_msat, - payment_id, - payment_secret: payment_secret.clone(), - payment_params: payment_params.clone(), - }, onion_packet, &self.logger), - channel_state, chan) - } { - Some((update_add, commitment_signed, monitor_update)) => { - if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) { - maybe_break_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, true); - // Note that MonitorUpdateFailed here indicates (per function docs) - // that we will resend the commitment update once monitor updating - // is restored. Therefore, we must return an error indicating that - // it is unsafe to retry the payment wholesale, which we do in the - // send_payment check for MonitorUpdateFailed, below. - insert_outbound_payment!(); // Only do this after possibly break'ing on Perm failure above. - return Err(APIError::MonitorUpdateFailed); + let per_peer_state = self.per_peer_state.read().unwrap(); + if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) { + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + if let hash_map::Entry::Occupied(mut chan) = peer_state.channel_by_id.entry(id) { + match { + if chan.get().get_counterparty_node_id() != path.first().unwrap().pubkey { + return Err(APIError::RouteError{err: "Node ID mismatch on first hop!"}); } - insert_outbound_payment!(); + if !chan.get().is_live() { + return Err(APIError::ChannelUnavailable{err: "Peer for first hop currently disconnected/pending monitor update!".to_owned()}); + } + break_chan_entry!(self, chan.get_mut().send_htlc_and_commit( + htlc_msat, payment_hash.clone(), htlc_cltv, HTLCSource::OutboundRoute { + path: path.clone(), + session_priv: session_priv.clone(), + first_hop_htlc_msat: htlc_msat, + payment_id, + payment_secret: payment_secret.clone(), + payment_params: payment_params.clone(), + }, onion_packet, &self.logger), + channel_state, chan) + } { + Some((update_add, commitment_signed, monitor_update)) => { + if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) { + maybe_break_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, true); + // Note that MonitorUpdateFailed here indicates (per function docs) + // that we will resend the commitment update once monitor updating + // is restored. Therefore, we must return an error indicating that + // it is unsafe to retry the payment wholesale, which we do in the + // send_payment check for MonitorUpdateFailed, below. + insert_outbound_payment!(); // Only do this after possibly break'ing on Perm failure above. + return Err(APIError::MonitorUpdateFailed); + } + insert_outbound_payment!(); - log_debug!(self.logger, "Sending payment along path resulted in a commitment_signed for channel {}", log_bytes!(chan.get().channel_id())); - channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { - node_id: path.first().unwrap().pubkey, - updates: msgs::CommitmentUpdate { - update_add_htlcs: vec![update_add], - update_fulfill_htlcs: Vec::new(), - update_fail_htlcs: Vec::new(), - update_fail_malformed_htlcs: Vec::new(), - update_fee: None, - commitment_signed, - }, - }); - }, - None => { insert_outbound_payment!(); }, - } - } else { unreachable!(); } + log_debug!(self.logger, "Sending payment along path resulted in a commitment_signed for channel {}", log_bytes!(chan.get().channel_id())); + channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { + node_id: path.first().unwrap().pubkey, + updates: msgs::CommitmentUpdate { + update_add_htlcs: vec![update_add], + update_fulfill_htlcs: Vec::new(), + update_fail_htlcs: Vec::new(), + update_fail_malformed_htlcs: Vec::new(), + update_fee: None, + commitment_signed, + }, + }); + }, + None => { insert_outbound_payment!(); }, + } + } else { return Err(APIError::ChannelUnavailable{err: format!("No such channel for the counterparty_node_id {}, as indicated by the short_to_id map", counterparty_node_id) })} + } else { return Err(APIError::ChannelUnavailable{err: format!("No such counterparty_node_id {}, as indicated by the short_to_id map", counterparty_node_id) })} return Ok(()); }; @@ -2724,45 +2779,61 @@ impl ChannelMana /// Handles the generation of a funding transaction, optionally (for tests) with a function /// which checks the correctness of the funding transaction given the associated channel. fn funding_transaction_generated_intern, &Transaction) -> Result>( - &self, temporary_channel_id: &[u8; 32], _counterparty_node_id: &PublicKey, funding_transaction: Transaction, find_funding_output: FundingOutput + &self, temporary_channel_id: &[u8; 32], counterparty_node_id: &PublicKey, funding_transaction: Transaction, find_funding_output: FundingOutput ) -> Result<(), APIError> { - let (chan, msg) = { - let (res, chan) = match self.channel_state.lock().unwrap().by_id.remove(temporary_channel_id) { - Some(mut chan) => { - let funding_txo = find_funding_output(&chan, &funding_transaction)?; - - (chan.get_outbound_funding_created(funding_transaction, funding_txo, &self.logger) - .map_err(|e| if let ChannelError::Close(msg) = e { - MsgHandleErrInternal::from_finish_shutdown(msg, chan.channel_id(), chan.get_user_id(), chan.force_shutdown(true), None) - } else { unreachable!(); }) - , chan) - }, - None => { return Err(APIError::ChannelUnavailable { err: "No such channel".to_owned() }) }, + let mut channel_state_lock = self.channel_state.lock().unwrap(); + let channel_state = &mut *channel_state_lock; + let per_peer_state = self.per_peer_state.read().unwrap(); + if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) { + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + + let (chan, msg) = { + let (res, chan) = { + match peer_state.channel_by_id.remove(temporary_channel_id) { + Some(mut chan) => { + let funding_txo = find_funding_output(&chan, &funding_transaction)?; + + (chan.get_outbound_funding_created(funding_transaction, funding_txo, &self.logger) + .map_err(|e| if let ChannelError::Close(msg) = e { + MsgHandleErrInternal::from_finish_shutdown(msg, chan.channel_id(), chan.get_user_id(), chan.force_shutdown(true), None) + } else { unreachable!(); }) + , chan) + }, + None => { return Err(APIError::ChannelUnavailable { err: format!("No such channel for the passed counterparty_node_id {}", counterparty_node_id) }) }, + } + }; + match handle_error!(self, res, chan.get_counterparty_node_id()) { + Ok(funding_msg) => { + (chan, funding_msg) + }, + Err(_) => { return Err(APIError::ChannelUnavailable { + err: "Error deriving keys or signing initial commitment transactions - either our RNG or our counterparty's RNG is broken or the Signer refused to sign".to_owned() + }) }, + } }; - match handle_error!(self, res, chan.get_counterparty_node_id()) { - Ok(funding_msg) => { - (chan, funding_msg) - }, - Err(_) => { return Err(APIError::ChannelUnavailable { - err: "Error deriving keys or signing initial commitment transactions - either our RNG or our counterparty's RNG is broken or the Signer refused to sign".to_owned() - }) }, - } - }; - let mut channel_state = self.channel_state.lock().unwrap(); - channel_state.pending_msg_events.push(events::MessageSendEvent::SendFundingCreated { - node_id: chan.get_counterparty_node_id(), - msg, - }); - match channel_state.by_id.entry(chan.channel_id()) { - hash_map::Entry::Occupied(_) => { - panic!("Generated duplicate funding txid?"); - }, - hash_map::Entry::Vacant(e) => { - e.insert(chan); + channel_state.pending_msg_events.push(events::MessageSendEvent::SendFundingCreated { + node_id: chan.get_counterparty_node_id(), + msg, + }); + let chan_id = chan.channel_id(); + match peer_state.channel_by_id.entry(chan_id) { + hash_map::Entry::Occupied(_) => { + panic!("Generated duplicate funding txid?"); + }, + hash_map::Entry::Vacant(e) => { + e.insert(chan); + let mut id_to_peer = self.id_to_peer.lock().unwrap(); + if id_to_peer.insert(chan_id, *counterparty_node_id).is_some() { + panic!("id_to_peer map already contained funding txid, which shouldn't be possible"); + } + } } + Ok(()) + } else { + return Err(APIError::APIMisuseError { err: format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id) }) } - Ok(()) } #[cfg(test)] @@ -2885,22 +2956,27 @@ impl ChannelMana let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_state_lock; + let per_peer_state = self.per_peer_state.read().unwrap(); let mut announced_chans = false; - for (_, chan) in channel_state.by_id.iter() { - if let Some(msg) = chan.get_signed_channel_announcement(self.get_our_node_id(), self.genesis_hash.clone(), self.best_block.read().unwrap().height()) { - channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelAnnouncement { - msg, - update_msg: match self.get_channel_update_for_broadcast(chan) { - Ok(msg) => msg, - Err(_) => continue, - }, - }); - announced_chans = true; - } else { - // If the channel is not public or has not yet reached channel_ready, check the - // next channel. If we don't yet have any public channels, we'll skip the broadcast - // below as peers may not accept it without channels on chain first. + for (_cp_id, peer_state_mutex) in per_peer_state.iter() { + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + for (_, chan) in peer_state.channel_by_id.iter() { + if let Some(msg) = chan.get_signed_channel_announcement(self.get_our_node_id(), self.genesis_hash.clone(), self.best_block.read().unwrap().height()) { + channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelAnnouncement { + msg, + update_msg: match self.get_channel_update_for_broadcast(chan) { + Ok(msg) => msg, + Err(_) => continue, + }, + }); + announced_chans = true; + } else { + // If the channel is not public or has not yet reached channel_ready, check the + // next channel. If we don't yet have any public channels, we'll skip the broadcast + // below as peers may not accept it without channels on chain first. + } } } @@ -2928,11 +3004,38 @@ impl ChannelMana { let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_state_lock; + let per_peer_state = self.per_peer_state.read().unwrap(); + + let mut htlcs_msgs_by_id: HashMap<[u8; 32], (Vec, Vec, PublicKey)> = HashMap::new(); + + macro_rules! add_channel_key { + ($channel_id: expr, $counterparty_node_id: expr) => {{ + if !htlcs_msgs_by_id.contains_key(&$channel_id){ + htlcs_msgs_by_id.insert($channel_id, (Vec::new(), Vec::new(), $counterparty_node_id)); + } + }} + } + + macro_rules! add_update_htlc_msg { + ($htlc_msg: expr, $channel_id: expr, $counterparty_node_id: expr) => {{ + add_channel_key!($channel_id, $counterparty_node_id); + if let hash_map::Entry::Occupied(mut entry) = htlcs_msgs_by_id.entry($channel_id) { + let msgs_entry = entry.get_mut(); + if let Some(msg) = (&$htlc_msg as &Any).downcast_ref::() { + msgs_entry.0.push(msg.clone()); + } else if let Some(msg) = (&$htlc_msg as &Any).downcast_ref::() { + msgs_entry.1.push(msg.clone()); + } else { + panic!("Only UpdateAddHTLC or UpdateFailHTLC msgs supported for add_update_htlc_msg"); + } + } + }} + } for (short_chan_id, mut pending_forwards) in channel_state.forward_htlcs.drain() { if short_chan_id != 0 { - let forward_chan_id = match channel_state.short_to_id.get(&short_chan_id) { - Some(chan_id) => chan_id.clone(), + let (counterparty_node_id, forward_chan_id) = match channel_state.short_to_chan_info.get(&short_chan_id) { + Some((cp_id, chan_id)) => (cp_id.clone(), chan_id.clone()), None => { for forward_info in pending_forwards.drain(..) { match forward_info { @@ -3002,129 +3105,117 @@ impl ChannelMana continue; } }; - if let hash_map::Entry::Occupied(mut chan) = channel_state.by_id.entry(forward_chan_id) { - let mut add_htlc_msgs = Vec::new(); - let mut fail_htlc_msgs = Vec::new(); - for forward_info in pending_forwards.drain(..) { - match forward_info { - HTLCForwardInfo::AddHTLC { prev_short_channel_id, prev_htlc_id, forward_info: PendingHTLCInfo { - routing: PendingHTLCRouting::Forward { - onion_packet, .. - }, incoming_shared_secret, payment_hash, amt_to_forward, outgoing_cltv_value }, - prev_funding_outpoint } => { - log_trace!(self.logger, "Adding HTLC from short id {} with payment_hash {} to channel with short id {} after delay", prev_short_channel_id, log_bytes!(payment_hash.0), short_chan_id); - let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData { - short_channel_id: prev_short_channel_id, - outpoint: prev_funding_outpoint, - htlc_id: prev_htlc_id, - incoming_packet_shared_secret: incoming_shared_secret, - // Phantom payments are only PendingHTLCRouting::Receive. - phantom_shared_secret: None, - }); - match chan.get_mut().send_htlc(amt_to_forward, payment_hash, outgoing_cltv_value, htlc_source.clone(), onion_packet, &self.logger) { - Err(e) => { - if let ChannelError::Ignore(msg) = e { - log_trace!(self.logger, "Failed to forward HTLC with payment_hash {}: {}", log_bytes!(payment_hash.0), msg); - } else { - panic!("Stated return value requirements in send_htlc() were not met"); + if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) { + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + if peer_state.channel_by_id.contains_key(&forward_chan_id) { + for forward_info in pending_forwards.drain(..) { + match forward_info { + HTLCForwardInfo::AddHTLC { prev_short_channel_id, prev_htlc_id, forward_info: PendingHTLCInfo { + routing: PendingHTLCRouting::Forward { + onion_packet, .. + }, incoming_shared_secret, payment_hash, amt_to_forward, outgoing_cltv_value }, + prev_funding_outpoint } => { + log_trace!(self.logger, "Adding HTLC from short id {} with payment_hash {} to channel with short id {} after delay", prev_short_channel_id, log_bytes!(payment_hash.0), short_chan_id); + let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData { + short_channel_id: prev_short_channel_id, + outpoint: prev_funding_outpoint, + htlc_id: prev_htlc_id, + incoming_packet_shared_secret: incoming_shared_secret, + // Phantom payments are only PendingHTLCRouting::Receive. + phantom_shared_secret: None, + }); + + // Attempt to forward the HTLC over all available channels + // to the peer, but attempt to forward the HTLC over the + // channel specified in the onion payload first. + let counterparty_channel_ids = core::iter::once(forward_chan_id) + .chain(peer_state.channel_by_id.keys() + .filter(|&chan_id| *chan_id != forward_chan_id) + .map(|&chan_id| chan_id)) + .collect::>(); + let mut send_succeeded = false; + for chan_id in counterparty_channel_ids { + match peer_state.channel_by_id.get_mut(&chan_id).unwrap().send_htlc(amt_to_forward, payment_hash, outgoing_cltv_value, htlc_source.clone(), onion_packet.clone(), &self.logger) { + Err(e) => { + if let ChannelError::Ignore(msg) = e { + log_trace!( + self.logger, + "Could not forward HTLC with payment_hash {}, over channel {} to peer {}. Will attempt to forward the HTLC over a substitute channel instead if possible. Reason: {}", + log_bytes!(payment_hash.0), log_bytes!(chan_id), counterparty_node_id, msg + ); + } else { + panic!("Stated return value requirements in send_htlc() were not met"); + } + }, + Ok(update_add) => { + match update_add { + Some(msg) => { + log_info!(self.logger, "Will forward HTLC with payment_hash {}, over channel {}", log_bytes!(payment_hash.0), log_bytes!(chan_id)); + add_update_htlc_msg!(msg, chan_id, counterparty_node_id); + }, + None => { + // Nothing to do here...we're waiting on a remote + // revoke_and_ack before we can add anymore HTLCs. The Channel + // will automatically handle building the update_add_htlc and + // commitment_signed messages when we can. + // TODO: Do some kind of timer to set the channel as !is_live() + // as we don't really want others relying on us relaying through + // this channel currently :/. + } + } + send_succeeded = true; + break; + } } - let (failure_code, data) = self.get_htlc_temp_fail_err_and_data(0x1000|7, short_chan_id, chan.get()); + } + if !send_succeeded { + log_trace!(self.logger, "Failed to forward HTLC with payment_hash {} over all of the available channels to the peer", log_bytes!(payment_hash.0)); + let (failure_code, data) = self.get_htlc_temp_fail_err_and_data(0x1000|7, short_chan_id, peer_state.channel_by_id.get(&forward_chan_id).unwrap()); failed_forwards.push((htlc_source, payment_hash, HTLCFailReason::Reason { failure_code, data } )); continue; - }, - Ok(update_add) => { - match update_add { - Some(msg) => { add_htlc_msgs.push(msg); }, - None => { - // Nothing to do here...we're waiting on a remote - // revoke_and_ack before we can add anymore HTLCs. The Channel - // will automatically handle building the update_add_htlc and - // commitment_signed messages when we can. - // TODO: Do some kind of timer to set the channel as !is_live() - // as we don't really want others relying on us relaying through - // this channel currently :/. - } - } } - } - }, - HTLCForwardInfo::AddHTLC { .. } => { - panic!("short_channel_id != 0 should imply any pending_forward entries are of type Forward"); - }, - HTLCForwardInfo::FailHTLC { htlc_id, err_packet } => { - log_trace!(self.logger, "Failing HTLC back to channel with short id {} (backward HTLC ID {}) after delay", short_chan_id, htlc_id); - match chan.get_mut().get_update_fail_htlc(htlc_id, err_packet, &self.logger) { - Err(e) => { - if let ChannelError::Ignore(msg) = e { - log_trace!(self.logger, "Failed to fail HTLC with ID {} backwards to short_id {}: {}", htlc_id, short_chan_id, msg); - } else { - panic!("Stated return value requirements in get_update_fail_htlc() were not met"); + }, + HTLCForwardInfo::AddHTLC { .. } => { + panic!("short_channel_id != 0 should imply any pending_forward entries are of type Forward"); + }, + HTLCForwardInfo::FailHTLC { htlc_id, err_packet } => { + log_trace!(self.logger, "Failing HTLC back to channel with short id {} (backward HTLC ID {}) after delay", short_chan_id, htlc_id); + match peer_state.channel_by_id.get_mut(&forward_chan_id).unwrap().get_update_fail_htlc(htlc_id, err_packet, &self.logger) { + Err(e) => { + if let ChannelError::Ignore(msg) = e { + log_trace!(self.logger, "Failed to fail HTLC with ID {} backwards to short_id {}: {}", htlc_id, short_chan_id, msg); + } else { + panic!("Stated return value requirements in get_update_fail_htlc() were not met"); + } + // fail-backs are best-effort, we probably already have one + // pending, and if not that's OK, if not, the channel is on + // the chain and sending the HTLC-Timeout is their problem. + continue; + }, + Ok(Some(msg)) => { add_update_htlc_msg!(msg, forward_chan_id, counterparty_node_id); }, + Ok(None) => { + // Nothing to do here...we're waiting on a remote + // revoke_and_ack before we can update the commitment + // transaction. The Channel will automatically handle + // building the update_fail_htlc and commitment_signed + // messages when we can. + // We don't need any kind of timer here as they should fail + // the channel onto the chain if they can't get our + // update_fail_htlc in time, it's not our problem. } - // fail-backs are best-effort, we probably already have one - // pending, and if not that's OK, if not, the channel is on - // the chain and sending the HTLC-Timeout is their problem. - continue; - }, - Ok(Some(msg)) => { fail_htlc_msgs.push(msg); }, - Ok(None) => { - // Nothing to do here...we're waiting on a remote - // revoke_and_ack before we can update the commitment - // transaction. The Channel will automatically handle - // building the update_fail_htlc and commitment_signed - // messages when we can. - // We don't need any kind of timer here as they should fail - // the channel onto the chain if they can't get our - // update_fail_htlc in time, it's not our problem. - } - } - }, - } - } - - if !add_htlc_msgs.is_empty() || !fail_htlc_msgs.is_empty() { - let (commitment_msg, monitor_update) = match chan.get_mut().send_commitment(&self.logger) { - Ok(res) => res, - Err(e) => { - // We surely failed send_commitment due to bad keys, in that case - // close channel and then send error message to peer. - let counterparty_node_id = chan.get().get_counterparty_node_id(); - let err: Result<(), _> = match e { - ChannelError::Ignore(_) | ChannelError::Warn(_) => { - panic!("Stated return value requirements in send_commitment() were not met"); } - ChannelError::Close(msg) => { - log_trace!(self.logger, "Closing channel {} due to Close-required error: {}", log_bytes!(chan.key()[..]), msg); - let mut channel = remove_channel!(self, channel_state, chan); - // ChannelClosed event is generated by handle_error for us. - Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel.channel_id(), channel.get_user_id(), channel.force_shutdown(true), self.get_channel_update_for_broadcast(&channel).ok())) - }, - ChannelError::CloseDelayBroadcast(_) => { panic!("Wait is only generated on receipt of channel_reestablish, which is handled by try_chan_entry, we don't bother to support it here"); } - }; - handle_errors.push((counterparty_node_id, err)); - continue; + }, } - }; - if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) { - handle_errors.push((chan.get().get_counterparty_node_id(), handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, true))); - continue; } - log_debug!(self.logger, "Forwarding HTLCs resulted in a commitment update with {} HTLCs added and {} HTLCs failed for channel {}", - add_htlc_msgs.len(), fail_htlc_msgs.len(), log_bytes!(chan.get().channel_id())); - channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { - node_id: chan.get().get_counterparty_node_id(), - updates: msgs::CommitmentUpdate { - update_add_htlcs: add_htlc_msgs, - update_fulfill_htlcs: Vec::new(), - update_fail_htlcs: fail_htlc_msgs, - update_fail_malformed_htlcs: Vec::new(), - update_fee: None, - commitment_signed: commitment_msg, - }, - }); + } else { + let err = Err(MsgHandleErrInternal::send_err_msg_no_close(format!("No such channel for the counterparty_node_id {}, as indicated by the short_to_id map", counterparty_node_id), forward_chan_id)); + handle_errors.push((counterparty_node_id, err)); } - } else { + } + else { unreachable!(); } } else { @@ -3302,6 +3393,53 @@ impl ChannelMana } } } + for (chan_id, (add_htlc_msgs, fail_htlc_msgs, counterparty_node_id)) in htlcs_msgs_by_id.into_iter() { + if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) { + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + if let hash_map::Entry::Occupied(mut chan) = peer_state.channel_by_id.entry(chan_id) { + let (commitment_msg, monitor_update) = match chan.get_mut().send_commitment(&self.logger) { + Ok(res) => res, + Err(e) => { + // We surely failed send_commitment due to bad keys, in that case + // close channel and then send error message to peer. + let counterparty_node_id = chan.get().get_counterparty_node_id(); + let err: Result<(), _> = match e { + ChannelError::Ignore(_) | ChannelError::Warn(_) => { + panic!("Stated return value requirements in send_commitment() were not met"); + } + ChannelError::Close(msg) => { + log_trace!(self.logger, "Closing channel {} due to Close-required error: {}", log_bytes!(chan.key()[..]), msg); + let mut channel = remove_channel!(self, channel_state, chan); + // ChannelClosed event is generated by handle_error for us. + Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel.channel_id(), channel.get_user_id(), channel.force_shutdown(true), self.get_channel_update_for_broadcast(&channel).ok())) + }, + ChannelError::CloseDelayBroadcast(_) => { panic!("Wait is only generated on receipt of channel_reestablish, which is handled by try_chan_entry, we don't bother to support it here"); } + }; + handle_errors.push((counterparty_node_id, err)); + continue; + } + }; + if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) { + handle_errors.push((chan.get().get_counterparty_node_id(), handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, true))); + continue; + } + log_debug!(self.logger, "Forwarding HTLCs resulted in a commitment update with {} HTLCs added and {} HTLCs failed for channel {}", + add_htlc_msgs.len(), fail_htlc_msgs.len(), log_bytes!(chan.get().channel_id())); + channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { + node_id: chan.get().get_counterparty_node_id(), + updates: msgs::CommitmentUpdate { + update_add_htlcs: add_htlc_msgs, + update_fulfill_htlcs: Vec::new(), + update_fail_htlcs: fail_htlc_msgs, + update_fail_malformed_htlcs: Vec::new(), + update_fee: None, + commitment_signed: commitment_msg, + }, + }); + } + } + } } for (htlc_source, payment_hash, failure_reason) in failed_forwards.drain(..) { @@ -3349,7 +3487,7 @@ impl ChannelMana self.process_background_events(); } - fn update_channel_fee(&self, short_to_id: &mut HashMap, pending_msg_events: &mut Vec, chan_id: &[u8; 32], chan: &mut Channel, new_feerate: u32) -> (bool, NotifyOption, Result<(), MsgHandleErrInternal>) { + fn update_channel_fee(&self, short_to_chan_info: &mut HashMap, pending_msg_events: &mut Vec, chan_id: &[u8; 32], chan: &mut Channel, new_feerate: u32) -> (bool, NotifyOption, Result<(), MsgHandleErrInternal>) { if !chan.is_outbound() { return (true, NotifyOption::SkipPersist, Ok(())); } // If the feerate has decreased by less than half, don't bother if new_feerate <= chan.get_feerate() && new_feerate * 2 > chan.get_feerate() { @@ -3369,7 +3507,7 @@ impl ChannelMana let res = match chan.send_update_fee_and_commit(new_feerate, &self.logger) { Ok(res) => Ok(res), Err(e) => { - let (drop, res) = convert_chan_err!(self, e, short_to_id, chan, chan_id); + let (drop, res) = convert_chan_err!(self, e, short_to_chan_info, chan, chan_id); if drop { retain_channel = false; } Err(res) } @@ -3377,7 +3515,7 @@ impl ChannelMana let ret_err = match res { Ok(Some((update_fee, commitment_signed, monitor_update))) => { if let Err(e) = self.chain_monitor.update_channel(chan.get_funding_txo().unwrap(), monitor_update) { - let (res, drop) = handle_monitor_err!(self, e, short_to_id, chan, RAACommitmentOrder::CommitmentFirst, chan_id, COMMITMENT_UPDATE_ONLY); + let (res, drop) = handle_monitor_err!(self, e, short_to_chan_info, chan, RAACommitmentOrder::CommitmentFirst, chan_id, COMMITMENT_UPDATE_ONLY); if drop { retain_channel = false; } res } else { @@ -3417,15 +3555,20 @@ impl ChannelMana let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_state_lock; let pending_msg_events = &mut channel_state.pending_msg_events; - let short_to_id = &mut channel_state.short_to_id; - channel_state.by_id.retain(|chan_id, chan| { - let (retain_channel, chan_needs_persist, err) = self.update_channel_fee(short_to_id, pending_msg_events, chan_id, chan, new_feerate); - if chan_needs_persist == NotifyOption::DoPersist { should_persist = NotifyOption::DoPersist; } - if err.is_err() { - handle_errors.push(err); - } - retain_channel - }); + let short_to_chan_info = &mut channel_state.short_to_chan_info; + let per_peer_state = self.per_peer_state.read().unwrap(); + for (_cp_id, peer_state_mutex) in per_peer_state.iter() { + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + peer_state.channel_by_id.retain(|chan_id, chan| { + let (retain_channel, chan_needs_persist, err) = self.update_channel_fee(short_to_chan_info, pending_msg_events, chan_id, chan, new_feerate); + if chan_needs_persist == NotifyOption::DoPersist { should_persist = NotifyOption::DoPersist; } + if err.is_err() { + handle_errors.push(err); + } + retain_channel + }); + } } should_persist @@ -3455,50 +3598,54 @@ impl ChannelMana let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_state_lock; let pending_msg_events = &mut channel_state.pending_msg_events; - let short_to_id = &mut channel_state.short_to_id; - channel_state.by_id.retain(|chan_id, chan| { - let counterparty_node_id = chan.get_counterparty_node_id(); - let (retain_channel, chan_needs_persist, err) = self.update_channel_fee(short_to_id, pending_msg_events, chan_id, chan, new_feerate); - if chan_needs_persist == NotifyOption::DoPersist { should_persist = NotifyOption::DoPersist; } - if err.is_err() { - handle_errors.push((err, counterparty_node_id)); - } - if !retain_channel { return false; } + let short_to_chan_info = &mut channel_state.short_to_chan_info; + let per_peer_state = self.per_peer_state.read().unwrap(); + for (counterparty_node_id, peer_state_mutex) in per_peer_state.iter() { + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + peer_state.channel_by_id.retain(|chan_id, chan| { + let (retain_channel, chan_needs_persist, err) = self.update_channel_fee(short_to_chan_info, pending_msg_events, chan_id, chan, new_feerate); + if chan_needs_persist == NotifyOption::DoPersist { should_persist = NotifyOption::DoPersist; } + if err.is_err() { + handle_errors.push((err, *counterparty_node_id)); + } + if !retain_channel { return false; } - if let Err(e) = chan.timer_check_closing_negotiation_progress() { - let (needs_close, err) = convert_chan_err!(self, e, short_to_id, chan, chan_id); - handle_errors.push((Err(err), chan.get_counterparty_node_id())); - if needs_close { return false; } - } + if let Err(e) = chan.timer_check_closing_negotiation_progress() { + let (needs_close, err) = convert_chan_err!(self, e, short_to_chan_info, chan, chan_id); + handle_errors.push((Err(err), chan.get_counterparty_node_id())); + if needs_close { return false; } + } - match chan.channel_update_status() { - ChannelUpdateStatus::Enabled if !chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::DisabledStaged), - ChannelUpdateStatus::Disabled if chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::EnabledStaged), - ChannelUpdateStatus::DisabledStaged if chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::Enabled), - ChannelUpdateStatus::EnabledStaged if !chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::Disabled), - ChannelUpdateStatus::DisabledStaged if !chan.is_live() => { - if let Ok(update) = self.get_channel_update_for_broadcast(&chan) { - pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { - msg: update - }); - } - should_persist = NotifyOption::DoPersist; - chan.set_channel_update_status(ChannelUpdateStatus::Disabled); - }, - ChannelUpdateStatus::EnabledStaged if chan.is_live() => { - if let Ok(update) = self.get_channel_update_for_broadcast(&chan) { - pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { - msg: update - }); - } - should_persist = NotifyOption::DoPersist; - chan.set_channel_update_status(ChannelUpdateStatus::Enabled); - }, - _ => {}, - } + match chan.channel_update_status() { + ChannelUpdateStatus::Enabled if !chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::DisabledStaged), + ChannelUpdateStatus::Disabled if chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::EnabledStaged), + ChannelUpdateStatus::DisabledStaged if chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::Enabled), + ChannelUpdateStatus::EnabledStaged if !chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::Disabled), + ChannelUpdateStatus::DisabledStaged if !chan.is_live() => { + if let Ok(update) = self.get_channel_update_for_broadcast(&chan) { + pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { + msg: update + }); + } + should_persist = NotifyOption::DoPersist; + chan.set_channel_update_status(ChannelUpdateStatus::Disabled); + }, + ChannelUpdateStatus::EnabledStaged if chan.is_live() => { + if let Ok(update) = self.get_channel_update_for_broadcast(&chan) { + pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { + msg: update + }); + } + should_persist = NotifyOption::DoPersist; + chan.set_channel_update_status(ChannelUpdateStatus::Enabled); + }, + _ => {}, + } - true - }); + true + }); + } channel_state.claimable_htlcs.retain(|payment_hash, (_, htlcs)| { if htlcs.is_empty() { @@ -3616,19 +3763,29 @@ impl ChannelMana // be surfaced to the user. fn fail_holding_cell_htlcs( &self, mut htlcs_to_fail: Vec<(HTLCSource, PaymentHash)>, channel_id: [u8; 32], - _counterparty_node_id: &PublicKey + counterparty_node_id: &PublicKey ) { for (htlc_src, payment_hash) in htlcs_to_fail.drain(..) { match htlc_src { HTLCSource::PreviousHopData(HTLCPreviousHopData { .. }) => { - let (failure_code, onion_failure_data) = - match self.channel_state.lock().unwrap().by_id.entry(channel_id) { - hash_map::Entry::Occupied(chan_entry) => { - self.get_htlc_inbound_temp_fail_err_and_data(0x1000|7, &chan_entry.get()) - }, - hash_map::Entry::Vacant(_) => (0x4000|10, Vec::new()) - }; let channel_state = self.channel_state.lock().unwrap(); + let (failure_code, onion_failure_data) = { + let per_peer_state = self.per_peer_state.read().unwrap(); + if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) { + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + match peer_state.channel_by_id.entry(channel_id) { + hash_map::Entry::Occupied(chan_entry) => { + self.get_htlc_inbound_temp_fail_err_and_data(0x1000|7, &chan_entry.get()) + }, + hash_map::Entry::Vacant(_) => (0x4000|10, Vec::new()) + } + } else { + // Peer must have been dropped before acquiring the per_peer_state + // read lock again in this function. + (0x4000|10, Vec::new()) + } + }; self.fail_htlc_backwards_internal(channel_state, htlc_src, &payment_hash, HTLCFailReason::Reason { failure_code, data: onion_failure_data}); }, @@ -3683,7 +3840,7 @@ impl ChannelMana /// to fail and take the channel_state lock for each iteration (as we take ownership and may /// drop it). In other words, no assumptions are made that entries in claimable_htlcs point to /// still-available channels. - fn fail_htlc_backwards_internal(&self, mut channel_state_lock: MutexGuard>, source: HTLCSource, payment_hash: &PaymentHash, onion_error: HTLCFailReason) { + fn fail_htlc_backwards_internal(&self, mut channel_state_lock: MutexGuard, source: HTLCSource, payment_hash: &PaymentHash, onion_error: HTLCFailReason) { //TODO: There is a timing attack here where if a node fails an HTLC back to us they can //identify whether we sent it or not based on the (I presume) very different runtime //between the branches here. We should make this async and move it into the forward HTLCs @@ -3875,7 +4032,7 @@ impl ChannelMana let mut expected_amt_msat = None; let mut valid_mpp = true; for htlc in sources.iter() { - if let None = channel_state.as_ref().unwrap().short_to_id.get(&htlc.prev_hop.short_channel_id) { + if let None = channel_state.as_ref().unwrap().short_to_chan_info.get(&htlc.prev_hop.short_channel_id) { valid_mpp = false; break; } @@ -3961,64 +4118,69 @@ impl ChannelMana } } - fn claim_funds_from_hop(&self, channel_state_lock: &mut MutexGuard>, prev_hop: HTLCPreviousHopData, payment_preimage: PaymentPreimage) -> ClaimFundsFromHop { + fn claim_funds_from_hop(&self, channel_state_lock: &mut MutexGuard, prev_hop: HTLCPreviousHopData, payment_preimage: PaymentPreimage) -> ClaimFundsFromHop { //TODO: Delay the claimed_funds relaying just like we do outbound relay! let channel_state = &mut **channel_state_lock; - let chan_id = match channel_state.short_to_id.get(&prev_hop.short_channel_id) { - Some(chan_id) => chan_id.clone(), + let per_peer_state = self.per_peer_state.read().unwrap(); + let (counterparty_node_id, chan_id) = match channel_state.short_to_chan_info.get(&prev_hop.short_channel_id) { + Some((cp_id, chan_id)) => (cp_id.clone(), chan_id.clone()), None => { return ClaimFundsFromHop::PrevHopForceClosed } }; - if let hash_map::Entry::Occupied(mut chan) = channel_state.by_id.entry(chan_id) { - match chan.get_mut().get_update_fulfill_htlc_and_commit(prev_hop.htlc_id, payment_preimage, &self.logger) { - Ok(msgs_monitor_option) => { - if let UpdateFulfillCommitFetch::NewClaim { msgs, htlc_value_msat, monitor_update } = msgs_monitor_option { + if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) { + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + if let hash_map::Entry::Occupied(mut chan) = peer_state.channel_by_id.entry(chan_id) { + match chan.get_mut().get_update_fulfill_htlc_and_commit(prev_hop.htlc_id, payment_preimage, &self.logger) { + Ok(msgs_monitor_option) => { + if let UpdateFulfillCommitFetch::NewClaim { msgs, htlc_value_msat, monitor_update } = msgs_monitor_option { + if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) { + log_given_level!(self.logger, if e == ChannelMonitorUpdateErr::PermanentFailure { Level::Error } else { Level::Debug }, + "Failed to update channel monitor with preimage {:?}: {:?}", + payment_preimage, e); + return ClaimFundsFromHop::MonitorUpdateFail( + chan.get().get_counterparty_node_id(), + handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, msgs.is_some()).unwrap_err(), + Some(htlc_value_msat) + ); + } + if let Some((msg, commitment_signed)) = msgs { + log_debug!(self.logger, "Claiming funds for HTLC with preimage {} resulted in a commitment_signed for channel {}", + log_bytes!(payment_preimage.0), log_bytes!(chan.get().channel_id())); + channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { + node_id: chan.get().get_counterparty_node_id(), + updates: msgs::CommitmentUpdate { + update_add_htlcs: Vec::new(), + update_fulfill_htlcs: vec![msg], + update_fail_htlcs: Vec::new(), + update_fail_malformed_htlcs: Vec::new(), + update_fee: None, + commitment_signed, + } + }); + } + return ClaimFundsFromHop::Success(htlc_value_msat); + } else { + return ClaimFundsFromHop::DuplicateClaim; + } + }, + Err((e, monitor_update)) => { if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) { - log_given_level!(self.logger, if e == ChannelMonitorUpdateErr::PermanentFailure { Level::Error } else { Level::Debug }, - "Failed to update channel monitor with preimage {:?}: {:?}", + log_given_level!(self.logger, if e == ChannelMonitorUpdateErr::PermanentFailure { Level::Error } else { Level::Info }, + "Failed to update channel monitor with preimage {:?} immediately prior to force-close: {:?}", payment_preimage, e); - return ClaimFundsFromHop::MonitorUpdateFail( - chan.get().get_counterparty_node_id(), - handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, msgs.is_some()).unwrap_err(), - Some(htlc_value_msat) - ); } - if let Some((msg, commitment_signed)) = msgs { - log_debug!(self.logger, "Claiming funds for HTLC with preimage {} resulted in a commitment_signed for channel {}", - log_bytes!(payment_preimage.0), log_bytes!(chan.get().channel_id())); - channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { - node_id: chan.get().get_counterparty_node_id(), - updates: msgs::CommitmentUpdate { - update_add_htlcs: Vec::new(), - update_fulfill_htlcs: vec![msg], - update_fail_htlcs: Vec::new(), - update_fail_malformed_htlcs: Vec::new(), - update_fee: None, - commitment_signed, - } - }); + let counterparty_node_id = chan.get().get_counterparty_node_id(); + let (drop, res) = convert_chan_err!(self, e, channel_state.short_to_chan_info, chan.get_mut(), &chan_id); + if drop { + chan.remove_entry(); } - return ClaimFundsFromHop::Success(htlc_value_msat); - } else { - return ClaimFundsFromHop::DuplicateClaim; - } - }, - Err((e, monitor_update)) => { - if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) { - log_given_level!(self.logger, if e == ChannelMonitorUpdateErr::PermanentFailure { Level::Error } else { Level::Info }, - "Failed to update channel monitor with preimage {:?} immediately prior to force-close: {:?}", - payment_preimage, e); - } - let counterparty_node_id = chan.get().get_counterparty_node_id(); - let (drop, res) = convert_chan_err!(self, e, channel_state.short_to_id, chan.get_mut(), &chan_id); - if drop { - chan.remove_entry(); - } - return ClaimFundsFromHop::MonitorUpdateFail(counterparty_node_id, res, None); - }, - } + return ClaimFundsFromHop::MonitorUpdateFail(counterparty_node_id, res, None); + }, + } + } else { return ClaimFundsFromHop::PrevHopForceClosed } } else { unreachable!(); } } @@ -4048,7 +4210,7 @@ impl ChannelMana } } - fn claim_funds_internal(&self, mut channel_state_lock: MutexGuard>, source: HTLCSource, payment_preimage: PaymentPreimage, forwarded_htlc_value_msat: Option, from_onchain: bool, next_channel_id: [u8; 32]) { + fn claim_funds_internal(&self, mut channel_state_lock: MutexGuard, source: HTLCSource, payment_preimage: PaymentPreimage, forwarded_htlc_value_msat: Option, from_onchain: bool, next_channel_id: [u8; 32]) { match source { HTLCSource::OutboundRoute { session_priv, payment_id, path, .. } => { mem::drop(channel_state_lock); @@ -4166,9 +4328,24 @@ impl ChannelMana let (mut pending_failures, finalized_claims) = { let mut channel_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_lock; - let mut channel = match channel_state.by_id.entry(funding_txo.to_channel_id()) { - hash_map::Entry::Occupied(chan) => chan, - hash_map::Entry::Vacant(_) => return, + let counterparty_node_id = { + let id_to_peer = self.id_to_peer.lock().unwrap(); + id_to_peer.get(&funding_txo.to_channel_id()).cloned() + }; + if counterparty_node_id.is_none() { + return + } + let per_peer_state = self.per_peer_state.read().unwrap(); + let mut peer_state_lock; + let mut channel = { + if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id.unwrap()) { + peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + match peer_state.channel_by_id.entry(funding_txo.to_channel_id()){ + hash_map::Entry::Occupied(chan) => chan, + hash_map::Entry::Vacant(_) => return, + } + } else { return } }; if !channel.get().is_awaiting_monitor_update() || channel.get().get_latest_monitor_update_id() != highest_applied_update_id { return; @@ -4248,36 +4425,40 @@ impl ChannelMana let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_state_lock; - match channel_state.by_id.entry(temporary_channel_id.clone()) { - hash_map::Entry::Occupied(mut channel) => { - if !channel.get().inbound_is_awaiting_accept() { - return Err(APIError::APIMisuseError { err: "The channel isn't currently awaiting to be accepted.".to_owned() }); - } - if *counterparty_node_id != channel.get().get_counterparty_node_id() { - return Err(APIError::APIMisuseError { err: "The passed counterparty_node_id doesn't match the channel's counterparty node_id".to_owned() }); - } - if accept_0conf { - channel.get_mut().set_0conf(); - } else if channel.get().get_channel_type().requires_zero_conf() { - let send_msg_err_event = events::MessageSendEvent::HandleError { + let per_peer_state = self.per_peer_state.read().unwrap(); + if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) { + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + match peer_state.channel_by_id.entry(temporary_channel_id.clone()) { + hash_map::Entry::Occupied(mut channel) => { + if !channel.get().inbound_is_awaiting_accept() { + return Err(APIError::APIMisuseError { err: "The channel isn't currently awaiting to be accepted.".to_owned() }); + } + if accept_0conf { + channel.get_mut().set_0conf(); + } else if channel.get().get_channel_type().requires_zero_conf() { + let send_msg_err_event = events::MessageSendEvent::HandleError { + node_id: channel.get().get_counterparty_node_id(), + action: msgs::ErrorAction::SendErrorMessage{ + msg: msgs::ErrorMessage { channel_id: temporary_channel_id.clone(), data: "No zero confirmation channels accepted".to_owned(), } + } + }; + channel_state.pending_msg_events.push(send_msg_err_event); + let _ = remove_channel!(self, channel_state, channel); + return Err(APIError::APIMisuseError { err: "Please use accept_inbound_channel_from_trusted_peer_0conf to accept channels with zero confirmations.".to_owned() }); + } + + channel_state.pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel { node_id: channel.get().get_counterparty_node_id(), - action: msgs::ErrorAction::SendErrorMessage{ - msg: msgs::ErrorMessage { channel_id: temporary_channel_id.clone(), data: "No zero confirmation channels accepted".to_owned(), } - } - }; - channel_state.pending_msg_events.push(send_msg_err_event); - let _ = remove_channel!(self, channel_state, channel); - return Err(APIError::APIMisuseError { err: "Please use accept_inbound_channel_from_trusted_peer_0conf to accept channels with zero confirmations.".to_owned() }); + msg: channel.get_mut().accept_inbound_channel(user_channel_id), + }); + } + hash_map::Entry::Vacant(_) => { + return Err(APIError::APIMisuseError { err: format!("No such channel for the passed counterparty_node_id {}", counterparty_node_id) }); } - - channel_state.pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel { - node_id: channel.get().get_counterparty_node_id(), - msg: channel.get_mut().accept_inbound_channel(user_channel_id), - }); - } - hash_map::Entry::Vacant(_) => { - return Err(APIError::ChannelUnavailable { err: "Can't accept a channel that doesn't exist".to_owned() }); } + } else { + return Err(APIError::APIMisuseError { err: format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id) }); } Ok(()) } @@ -4304,35 +4485,42 @@ impl ChannelMana }; let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_state_lock; - match channel_state.by_id.entry(channel.channel_id()) { - hash_map::Entry::Occupied(_) => { - self.outbound_scid_aliases.lock().unwrap().remove(&outbound_scid_alias); - return Err(MsgHandleErrInternal::send_err_msg_no_close("temporary_channel_id collision!".to_owned(), msg.temporary_channel_id.clone())) - }, - hash_map::Entry::Vacant(entry) => { - if !self.default_configuration.manually_accept_inbound_channels { - if channel.get_channel_type().requires_zero_conf() { - return Err(MsgHandleErrInternal::send_err_msg_no_close("No zero confirmation channels accepted".to_owned(), msg.temporary_channel_id.clone())); - } - channel_state.pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel { - node_id: counterparty_node_id.clone(), - msg: channel.accept_inbound_channel(0), - }); - } else { - let mut pending_events = self.pending_events.lock().unwrap(); - pending_events.push( - events::Event::OpenChannelRequest { - temporary_channel_id: msg.temporary_channel_id.clone(), - counterparty_node_id: counterparty_node_id.clone(), - funding_satoshis: msg.funding_satoshis, - push_msat: msg.push_msat, - channel_type: channel.get_channel_type().clone(), + let per_peer_state = self.per_peer_state.read().unwrap(); + if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) { + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + match peer_state.channel_by_id.entry(channel.channel_id()) { + hash_map::Entry::Occupied(_) => { + self.outbound_scid_aliases.lock().unwrap().remove(&outbound_scid_alias); + return Err(MsgHandleErrInternal::send_err_msg_no_close("temporary_channel_id collision for the same peer!".to_owned(), msg.temporary_channel_id.clone())) + }, + hash_map::Entry::Vacant(entry) => { + if !self.default_configuration.manually_accept_inbound_channels { + if channel.get_channel_type().requires_zero_conf() { + return Err(MsgHandleErrInternal::send_err_msg_no_close("No zero confirmation channels accepted".to_owned(), msg.temporary_channel_id.clone())); } - ); - } + channel_state.pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel { + node_id: counterparty_node_id.clone(), + msg: channel.accept_inbound_channel(0), + }); + } else { + let mut pending_events = self.pending_events.lock().unwrap(); + pending_events.push( + events::Event::OpenChannelRequest { + temporary_channel_id: msg.temporary_channel_id.clone(), + counterparty_node_id: counterparty_node_id.clone(), + funding_satoshis: msg.funding_satoshis, + push_msat: msg.push_msat, + channel_type: channel.get_channel_type().clone(), + } + ); + } - entry.insert(channel); + entry.insert(channel); + } } + } else { + return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id), msg.temporary_channel_id.clone())) } Ok(()) } @@ -4341,15 +4529,19 @@ impl ChannelMana let (value, output_script, user_id) = { let mut channel_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_lock; - match channel_state.by_id.entry(msg.temporary_channel_id) { - hash_map::Entry::Occupied(mut chan) => { - if chan.get().get_counterparty_node_id() != *counterparty_node_id { - return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.temporary_channel_id)); - } - try_chan_entry!(self, chan.get_mut().accept_channel(&msg, &self.default_configuration.channel_handshake_limits, &their_features), channel_state, chan); - (chan.get().get_value_satoshis(), chan.get().get_funding_redeemscript().to_v0_p2wsh(), chan.get().get_user_id()) - }, - hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.temporary_channel_id)) + let per_peer_state = self.per_peer_state.read().unwrap(); + if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) { + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + match peer_state.channel_by_id.entry(msg.temporary_channel_id) { + hash_map::Entry::Occupied(mut chan) => { + try_chan_entry!(self, chan.get_mut().accept_channel(&msg, &self.default_configuration.channel_handshake_limits, &their_features), channel_state, chan); + (chan.get().get_value_satoshis(), chan.get().get_funding_redeemscript().to_v0_p2wsh(), chan.get().get_user_id()) + }, + hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.temporary_channel_id)) + } + } else { + return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id), msg.temporary_channel_id)) } }; let mut pending_events = self.pending_events.lock().unwrap(); @@ -4368,18 +4560,22 @@ impl ChannelMana let best_block = *self.best_block.read().unwrap(); let mut channel_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_lock; - match channel_state.by_id.entry(msg.temporary_channel_id.clone()) { - hash_map::Entry::Occupied(mut chan) => { - if chan.get().get_counterparty_node_id() != *counterparty_node_id { - return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.temporary_channel_id)); - } - (try_chan_entry!(self, chan.get_mut().funding_created(msg, best_block, &self.logger), channel_state, chan), chan.remove()) - }, - hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.temporary_channel_id)) + let per_peer_state = self.per_peer_state.read().unwrap(); + if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) { + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + match peer_state.channel_by_id.entry(msg.temporary_channel_id.clone()) { + hash_map::Entry::Occupied(mut chan) => { + (try_chan_entry!(self, chan.get_mut().funding_created(msg, best_block, &self.logger), channel_state, chan), chan.remove()) + }, + hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.temporary_channel_id)) + } + } else { + return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id), msg.temporary_channel_id)) } }; // Because we have exclusive ownership of the channel here we can release the channel_state - // lock before watch_channel + // and peer_state lock before watch_channel if let Err(e) = self.chain_monitor.watch_channel(monitor.get_funding_txo().0, monitor) { match e { ChannelMonitorUpdateErr::PermanentFailure => { @@ -4406,21 +4602,31 @@ impl ChannelMana } let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_state_lock; - match channel_state.by_id.entry(funding_msg.channel_id) { - hash_map::Entry::Occupied(_) => { - return Err(MsgHandleErrInternal::send_err_msg_no_close("Already had channel with the new channel_id".to_owned(), funding_msg.channel_id)) - }, - hash_map::Entry::Vacant(e) => { - channel_state.pending_msg_events.push(events::MessageSendEvent::SendFundingSigned { - node_id: counterparty_node_id.clone(), - msg: funding_msg, - }); - if let Some(msg) = channel_ready { - send_channel_ready!(channel_state.short_to_id, channel_state.pending_msg_events, chan, msg); + let peer_state_lock = self.per_peer_state.read().unwrap(); + let channel_id = funding_msg.channel_id; + if let Some(peer_state_mutex) = peer_state_lock.get(counterparty_node_id) { + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + match peer_state.channel_by_id.entry(channel_id) { + hash_map::Entry::Occupied(_) => { + return Err(MsgHandleErrInternal::send_err_msg_no_close("Already had channel with the new channel_id".to_owned(), channel_id)) + }, + hash_map::Entry::Vacant(e) => { + channel_state.pending_msg_events.push(events::MessageSendEvent::SendFundingSigned { + node_id: counterparty_node_id.clone(), + msg: funding_msg, + }); + if let Some(msg) = channel_ready { + send_channel_ready!(channel_state.short_to_chan_info, channel_state.pending_msg_events, chan, msg); + } + e.insert(chan); + let mut id_to_peer = self.id_to_peer.lock().unwrap(); + if id_to_peer.insert(channel_id, *counterparty_node_id).is_some() { + panic!("id_to_peer map already contained funding txid, which shouldn't be possible"); + } } - e.insert(chan); } - } + } else { return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("The peer with node_id {}, was dropped before the signed funding tx was sent to the peer", counterparty_node_id), channel_id)) } Ok(()) } @@ -4429,33 +4635,37 @@ impl ChannelMana let best_block = *self.best_block.read().unwrap(); let mut channel_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_lock; - match channel_state.by_id.entry(msg.channel_id) { - hash_map::Entry::Occupied(mut chan) => { - if chan.get().get_counterparty_node_id() != *counterparty_node_id { - return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id)); - } - let (monitor, funding_tx, channel_ready) = match chan.get_mut().funding_signed(&msg, best_block, &self.logger) { - Ok(update) => update, - Err(e) => try_chan_entry!(self, Err(e), channel_state, chan), - }; - if let Err(e) = self.chain_monitor.watch_channel(chan.get().get_funding_txo().unwrap(), monitor) { - let mut res = handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::RevokeAndACKFirst, channel_ready.is_some(), OPTIONALLY_RESEND_FUNDING_LOCKED); - if let Err(MsgHandleErrInternal { ref mut shutdown_finish, .. }) = res { - // We weren't able to watch the channel to begin with, so no updates should be made on - // it. Previously, full_stack_target found an (unreachable) panic when the - // monitor update contained within `shutdown_finish` was applied. - if let Some((ref mut shutdown_finish, _)) = shutdown_finish { - shutdown_finish.0.take(); + let per_peer_state = self.per_peer_state.read().unwrap(); + if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) { + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + match peer_state.channel_by_id.entry(msg.channel_id) { + hash_map::Entry::Occupied(mut chan) => { + let (monitor, funding_tx, channel_ready) = match chan.get_mut().funding_signed(&msg, best_block, &self.logger) { + Ok(update) => update, + Err(e) => try_chan_entry!(self, Err(e), channel_state, chan), + }; + if let Err(e) = self.chain_monitor.watch_channel(chan.get().get_funding_txo().unwrap(), monitor) { + let mut res = handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::RevokeAndACKFirst, channel_ready.is_some(), OPTIONALLY_RESEND_FUNDING_LOCKED); + if let Err(MsgHandleErrInternal { ref mut shutdown_finish, .. }) = res { + // We weren't able to watch the channel to begin with, so no updates should be made on + // it. Previously, full_stack_target found an (unreachable) panic when the + // monitor update contained within `shutdown_finish` was applied. + if let Some((ref mut shutdown_finish, _)) = shutdown_finish { + shutdown_finish.0.take(); + } } + return res } - return res - } - if let Some(msg) = channel_ready { - send_channel_ready!(channel_state.short_to_id, channel_state.pending_msg_events, chan.get(), msg); - } - funding_tx - }, - hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id)) + if let Some(msg) = channel_ready { + send_channel_ready!(channel_state.short_to_chan_info, channel_state.pending_msg_events, chan.get(), msg); + } + funding_tx + }, + hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) + } + } else { + return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) } }; log_info!(self.logger, "Broadcasting funding transaction with txid {}", funding_tx.txid()); @@ -4466,36 +4676,40 @@ impl ChannelMana fn internal_channel_ready(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReady) -> Result<(), MsgHandleErrInternal> { let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_state_lock; - match channel_state.by_id.entry(msg.channel_id) { - hash_map::Entry::Occupied(mut chan) => { - if chan.get().get_counterparty_node_id() != *counterparty_node_id { - return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id)); - } - let announcement_sigs_opt = try_chan_entry!(self, chan.get_mut().channel_ready(&msg, self.get_our_node_id(), - self.genesis_hash.clone(), &self.best_block.read().unwrap(), &self.logger), channel_state, chan); - if let Some(announcement_sigs) = announcement_sigs_opt { - log_trace!(self.logger, "Sending announcement_signatures for channel {}", log_bytes!(chan.get().channel_id())); - channel_state.pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures { - node_id: counterparty_node_id.clone(), - msg: announcement_sigs, - }); - } else if chan.get().is_usable() { - // If we're sending an announcement_signatures, we'll send the (public) - // channel_update after sending a channel_announcement when we receive our - // counterparty's announcement_signatures. Thus, we only bother to send a - // channel_update here if the channel is not public, i.e. we're not sending an - // announcement_signatures. - log_trace!(self.logger, "Sending private initial channel_update for our counterparty on channel {}", log_bytes!(chan.get().channel_id())); - if let Ok(msg) = self.get_channel_update_for_unicast(chan.get()) { - channel_state.pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate { + let per_peer_state = self.per_peer_state.read().unwrap(); + if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) { + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + match peer_state.channel_by_id.entry(msg.channel_id) { + hash_map::Entry::Occupied(mut chan) => { + let announcement_sigs_opt = try_chan_entry!(self, chan.get_mut().channel_ready(&msg, self.get_our_node_id(), + self.genesis_hash.clone(), &self.best_block.read().unwrap(), &self.logger), channel_state, chan); + if let Some(announcement_sigs) = announcement_sigs_opt { + log_trace!(self.logger, "Sending announcement_signatures for channel {}", log_bytes!(chan.get().channel_id())); + channel_state.pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures { node_id: counterparty_node_id.clone(), - msg, + msg: announcement_sigs, }); + } else if chan.get().is_usable() { + // If we're sending an announcement_signatures, we'll send the (public) + // channel_update after sending a channel_announcement when we receive our + // counterparty's announcement_signatures. Thus, we only bother to send a + // channel_update here if the channel is not public, i.e. we're not sending an + // announcement_signatures. + log_trace!(self.logger, "Sending private initial channel_update for our counterparty on channel {}", log_bytes!(chan.get().channel_id())); + if let Ok(msg) = self.get_channel_update_for_unicast(chan.get()) { + channel_state.pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate { + node_id: counterparty_node_id.clone(), + msg, + }); + } } - } - Ok(()) - }, - hash_map::Entry::Vacant(_) => Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id)) + Ok(()) + }, + hash_map::Entry::Vacant(_) => Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) + } + } else { + Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) } } @@ -4505,43 +4719,47 @@ impl ChannelMana let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_state_lock; - match channel_state.by_id.entry(msg.channel_id.clone()) { - hash_map::Entry::Occupied(mut chan_entry) => { - if chan_entry.get().get_counterparty_node_id() != *counterparty_node_id { - return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id)); - } - - if !chan_entry.get().received_shutdown() { - log_info!(self.logger, "Received a shutdown message from our counterparty for channel {}{}.", - log_bytes!(msg.channel_id), - if chan_entry.get().sent_shutdown() { " after we initiated shutdown" } else { "" }); - } + let per_peer_state = self.per_peer_state.read().unwrap(); + if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) { + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + match peer_state.channel_by_id.entry(msg.channel_id.clone()) { + hash_map::Entry::Occupied(mut chan_entry) => { + + if !chan_entry.get().received_shutdown() { + log_info!(self.logger, "Received a shutdown message from our counterparty for channel {}{}.", + log_bytes!(msg.channel_id), + if chan_entry.get().sent_shutdown() { " after we initiated shutdown" } else { "" }); + } - let (shutdown, monitor_update, htlcs) = try_chan_entry!(self, chan_entry.get_mut().shutdown(&self.keys_manager, &their_features, &msg), channel_state, chan_entry); - dropped_htlcs = htlcs; - - // Update the monitor with the shutdown script if necessary. - if let Some(monitor_update) = monitor_update { - if let Err(e) = self.chain_monitor.update_channel(chan_entry.get().get_funding_txo().unwrap(), monitor_update) { - let (result, is_permanent) = - handle_monitor_err!(self, e, channel_state.short_to_id, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, chan_entry.key(), NO_UPDATE); - if is_permanent { - remove_channel!(self, channel_state, chan_entry); - break result; + let (shutdown, monitor_update, htlcs) = try_chan_entry!(self, chan_entry.get_mut().shutdown(&self.keys_manager, &their_features, &msg), channel_state, chan_entry); + dropped_htlcs = htlcs; + + // Update the monitor with the shutdown script if necessary. + if let Some(monitor_update) = monitor_update { + if let Err(e) = self.chain_monitor.update_channel(chan_entry.get().get_funding_txo().unwrap(), monitor_update) { + let (result, is_permanent) = + handle_monitor_err!(self, e, channel_state.short_to_chan_info, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, chan_entry.key(), NO_UPDATE); + if is_permanent { + remove_channel!(self, channel_state, chan_entry); + break result; + } } } - } - if let Some(msg) = shutdown { - channel_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown { - node_id: *counterparty_node_id, - msg, - }); - } + if let Some(msg) = shutdown { + channel_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown { + node_id: *counterparty_node_id, + msg, + }); + } - break Ok(()); - }, - hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id)) + break Ok(()); + }, + hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) + } + } else { + return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) } }; for htlc_source in dropped_htlcs.drain(..) { @@ -4556,28 +4774,32 @@ impl ChannelMana let (tx, chan_option) = { let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_state_lock; - match channel_state.by_id.entry(msg.channel_id.clone()) { - hash_map::Entry::Occupied(mut chan_entry) => { - if chan_entry.get().get_counterparty_node_id() != *counterparty_node_id { - return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id)); - } - let (closing_signed, tx) = try_chan_entry!(self, chan_entry.get_mut().closing_signed(&self.fee_estimator, &msg), channel_state, chan_entry); - if let Some(msg) = closing_signed { - channel_state.pending_msg_events.push(events::MessageSendEvent::SendClosingSigned { - node_id: counterparty_node_id.clone(), - msg, - }); - } - if tx.is_some() { - // We're done with this channel, we've got a signed closing transaction and - // will send the closing_signed back to the remote peer upon return. This - // also implies there are no pending HTLCs left on the channel, so we can - // fully delete it from tracking (the channel monitor is still around to - // watch for old state broadcasts)! - (tx, Some(remove_channel!(self, channel_state, chan_entry))) - } else { (tx, None) } - }, - hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id)) + let per_peer_state = self.per_peer_state.read().unwrap(); + if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) { + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + match peer_state.channel_by_id.entry(msg.channel_id.clone()) { + hash_map::Entry::Occupied(mut chan_entry) => { + let (closing_signed, tx) = try_chan_entry!(self, chan_entry.get_mut().closing_signed(&self.fee_estimator, &msg), channel_state, chan_entry); + if let Some(msg) = closing_signed { + channel_state.pending_msg_events.push(events::MessageSendEvent::SendClosingSigned { + node_id: counterparty_node_id.clone(), + msg, + }); + } + if tx.is_some() { + // We're done with this channel, we've got a signed closing transaction and + // will send the closing_signed back to the remote peer upon return. This + // also implies there are no pending HTLCs left on the channel, so we can + // fully delete it from tracking (the channel monitor is still around to + // watch for old state broadcasts)! + (tx, Some(remove_channel!(self, channel_state, chan_entry))) + } else { (tx, None) } + }, + hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) + } + } else { + return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) } }; if let Some(broadcast_tx) = tx { @@ -4609,37 +4831,41 @@ impl ChannelMana let (pending_forward_info, mut channel_state_lock) = self.decode_update_add_htlc_onion(msg); let channel_state = &mut *channel_state_lock; - match channel_state.by_id.entry(msg.channel_id) { - hash_map::Entry::Occupied(mut chan) => { - if chan.get().get_counterparty_node_id() != *counterparty_node_id { - return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id)); - } + let per_peer_state = self.per_peer_state.read().unwrap(); + if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) { + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + match peer_state.channel_by_id.entry(msg.channel_id) { + hash_map::Entry::Occupied(mut chan) => { - let create_pending_htlc_status = |chan: &Channel, pending_forward_info: PendingHTLCStatus, error_code: u16| { - // If the update_add is completely bogus, the call will Err and we will close, - // but if we've sent a shutdown and they haven't acknowledged it yet, we just - // want to reject the new HTLC and fail it backwards instead of forwarding. - match pending_forward_info { - PendingHTLCStatus::Forward(PendingHTLCInfo { ref incoming_shared_secret, .. }) => { - let reason = if (error_code & 0x1000) != 0 { - let (real_code, error_data) = self.get_htlc_inbound_temp_fail_err_and_data(error_code, chan); - onion_utils::build_first_hop_failure_packet(incoming_shared_secret, real_code, &error_data) - } else { - onion_utils::build_first_hop_failure_packet(incoming_shared_secret, error_code, &[]) - }; - let msg = msgs::UpdateFailHTLC { - channel_id: msg.channel_id, - htlc_id: msg.htlc_id, - reason - }; - PendingHTLCStatus::Fail(HTLCFailureMsg::Relay(msg)) - }, - _ => pending_forward_info - } - }; - try_chan_entry!(self, chan.get_mut().update_add_htlc(&msg, pending_forward_info, create_pending_htlc_status, &self.logger), channel_state, chan); - }, - hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id)) + let create_pending_htlc_status = |chan: &Channel, pending_forward_info: PendingHTLCStatus, error_code: u16| { + // If the update_add is completely bogus, the call will Err and we will close, + // but if we've sent a shutdown and they haven't acknowledged it yet, we just + // want to reject the new HTLC and fail it backwards instead of forwarding. + match pending_forward_info { + PendingHTLCStatus::Forward(PendingHTLCInfo { ref incoming_shared_secret, .. }) => { + let reason = if (error_code & 0x1000) != 0 { + let (real_code, error_data) = self.get_htlc_inbound_temp_fail_err_and_data(error_code, chan); + onion_utils::build_first_hop_failure_packet(incoming_shared_secret, real_code, &error_data) + } else { + onion_utils::build_first_hop_failure_packet(incoming_shared_secret, error_code, &[]) + }; + let msg = msgs::UpdateFailHTLC { + channel_id: msg.channel_id, + htlc_id: msg.htlc_id, + reason + }; + PendingHTLCStatus::Fail(HTLCFailureMsg::Relay(msg)) + }, + _ => pending_forward_info + } + }; + try_chan_entry!(self, chan.get_mut().update_add_htlc(&msg, pending_forward_info, create_pending_htlc_status, &self.logger), channel_state, chan); + }, + hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) + } + } else { + return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) } Ok(()) } @@ -4648,14 +4874,18 @@ impl ChannelMana let mut channel_lock = self.channel_state.lock().unwrap(); let (htlc_source, forwarded_htlc_value) = { let channel_state = &mut *channel_lock; - match channel_state.by_id.entry(msg.channel_id) { - hash_map::Entry::Occupied(mut chan) => { - if chan.get().get_counterparty_node_id() != *counterparty_node_id { - return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id)); - } - try_chan_entry!(self, chan.get_mut().update_fulfill_htlc(&msg), channel_state, chan) - }, - hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id)) + let per_peer_state = self.per_peer_state.read().unwrap(); + if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) { + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + match peer_state.channel_by_id.entry(msg.channel_id) { + hash_map::Entry::Occupied(mut chan) => { + try_chan_entry!(self, chan.get_mut().update_fulfill_htlc(&msg), channel_state, chan) + }, + hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) + } + } else { + return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) } }; self.claim_funds_internal(channel_lock, htlc_source, msg.payment_preimage.clone(), Some(forwarded_htlc_value), false, msg.channel_id); @@ -4665,14 +4895,18 @@ impl ChannelMana fn internal_update_fail_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFailHTLC) -> Result<(), MsgHandleErrInternal> { let mut channel_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_lock; - match channel_state.by_id.entry(msg.channel_id) { - hash_map::Entry::Occupied(mut chan) => { - if chan.get().get_counterparty_node_id() != *counterparty_node_id { - return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id)); - } - try_chan_entry!(self, chan.get_mut().update_fail_htlc(&msg, HTLCFailReason::LightningError { err: msg.reason.clone() }), channel_state, chan); - }, - hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id)) + let per_peer_state = self.per_peer_state.read().unwrap(); + if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) { + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + match peer_state.channel_by_id.entry(msg.channel_id) { + hash_map::Entry::Occupied(mut chan) => { + try_chan_entry!(self, chan.get_mut().update_fail_htlc(&msg, HTLCFailReason::LightningError { err: msg.reason.clone() }), channel_state, chan); + }, + hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) + } + } else { + return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)); } Ok(()) } @@ -4680,64 +4914,72 @@ impl ChannelMana fn internal_update_fail_malformed_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFailMalformedHTLC) -> Result<(), MsgHandleErrInternal> { let mut channel_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_lock; - match channel_state.by_id.entry(msg.channel_id) { - hash_map::Entry::Occupied(mut chan) => { - if chan.get().get_counterparty_node_id() != *counterparty_node_id { - return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id)); - } - if (msg.failure_code & 0x8000) == 0 { - let chan_err: ChannelError = ChannelError::Close("Got update_fail_malformed_htlc with BADONION not set".to_owned()); - try_chan_entry!(self, Err(chan_err), channel_state, chan); - } - try_chan_entry!(self, chan.get_mut().update_fail_malformed_htlc(&msg, HTLCFailReason::Reason { failure_code: msg.failure_code, data: Vec::new() }), channel_state, chan); - Ok(()) - }, - hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id)) + let per_peer_state = self.per_peer_state.read().unwrap(); + if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) { + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + match peer_state.channel_by_id.entry(msg.channel_id) { + hash_map::Entry::Occupied(mut chan) => { + if (msg.failure_code & 0x8000) == 0 { + let chan_err: ChannelError = ChannelError::Close("Got update_fail_malformed_htlc with BADONION not set".to_owned()); + try_chan_entry!(self, Err(chan_err), channel_state, chan); + } + try_chan_entry!(self, chan.get_mut().update_fail_malformed_htlc(&msg, HTLCFailReason::Reason { failure_code: msg.failure_code, data: Vec::new() }), channel_state, chan); + Ok(()) + }, + hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) + } + } else { + return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) } } fn internal_commitment_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::CommitmentSigned) -> Result<(), MsgHandleErrInternal> { let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_state_lock; - match channel_state.by_id.entry(msg.channel_id) { - hash_map::Entry::Occupied(mut chan) => { - if chan.get().get_counterparty_node_id() != *counterparty_node_id { - return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id)); - } - let (revoke_and_ack, commitment_signed, monitor_update) = - match chan.get_mut().commitment_signed(&msg, &self.logger) { - Err((None, e)) => try_chan_entry!(self, Err(e), channel_state, chan), - Err((Some(update), e)) => { - assert!(chan.get().is_awaiting_monitor_update()); - let _ = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), update); - try_chan_entry!(self, Err(e), channel_state, chan); - unreachable!(); - }, - Ok(res) => res - }; - if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) { - return_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::RevokeAndACKFirst, true, commitment_signed.is_some()); - } - channel_state.pending_msg_events.push(events::MessageSendEvent::SendRevokeAndACK { - node_id: counterparty_node_id.clone(), - msg: revoke_and_ack, - }); - if let Some(msg) = commitment_signed { - channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { + let per_peer_state = self.per_peer_state.read().unwrap(); + if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) { + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + match peer_state.channel_by_id.entry(msg.channel_id) { + hash_map::Entry::Occupied(mut chan) => { + let (revoke_and_ack, commitment_signed, monitor_update) = + match chan.get_mut().commitment_signed(&msg, &self.logger) { + Err((None, e)) => try_chan_entry!(self, Err(e), channel_state, chan), + Err((Some(update), e)) => { + assert!(chan.get().is_awaiting_monitor_update()); + let _ = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), update); + try_chan_entry!(self, Err(e), channel_state, chan); + unreachable!(); + }, + Ok(res) => res + }; + if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) { + return_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::RevokeAndACKFirst, true, commitment_signed.is_some()); + } + channel_state.pending_msg_events.push(events::MessageSendEvent::SendRevokeAndACK { node_id: counterparty_node_id.clone(), - updates: msgs::CommitmentUpdate { - update_add_htlcs: Vec::new(), - update_fulfill_htlcs: Vec::new(), - update_fail_htlcs: Vec::new(), - update_fail_malformed_htlcs: Vec::new(), - update_fee: None, - commitment_signed: msg, - }, + msg: revoke_and_ack, }); - } - Ok(()) - }, - hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id)) + if let Some(msg) = commitment_signed { + channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { + node_id: counterparty_node_id.clone(), + updates: msgs::CommitmentUpdate { + update_add_htlcs: Vec::new(), + update_fulfill_htlcs: Vec::new(), + update_fail_htlcs: Vec::new(), + update_fail_malformed_htlcs: Vec::new(), + update_fee: None, + commitment_signed: msg, + }, + }); + } + Ok(()) + }, + hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) + } + } else { + return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) } } @@ -4784,45 +5026,49 @@ impl ChannelMana let res = loop { let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_state_lock; - match channel_state.by_id.entry(msg.channel_id) { - hash_map::Entry::Occupied(mut chan) => { - if chan.get().get_counterparty_node_id() != *counterparty_node_id { - break Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id)); - } - let was_frozen_for_monitor = chan.get().is_awaiting_monitor_update(); - let raa_updates = break_chan_entry!(self, - chan.get_mut().revoke_and_ack(&msg, &self.logger), channel_state, chan); - htlcs_to_fail = raa_updates.holding_cell_failed_htlcs; - if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), raa_updates.monitor_update) { - if was_frozen_for_monitor { - assert!(raa_updates.commitment_update.is_none()); - assert!(raa_updates.accepted_htlcs.is_empty()); - assert!(raa_updates.failed_htlcs.is_empty()); - assert!(raa_updates.finalized_claimed_htlcs.is_empty()); - break Err(MsgHandleErrInternal::ignore_no_close("Previous monitor update failure prevented responses to RAA".to_owned())); - } else { - if let Err(e) = handle_monitor_err!(self, e, channel_state, chan, - RAACommitmentOrder::CommitmentFirst, false, - raa_updates.commitment_update.is_some(), false, - raa_updates.accepted_htlcs, raa_updates.failed_htlcs, - raa_updates.finalized_claimed_htlcs) { - break Err(e); - } else { unreachable!(); } + let per_peer_state = self.per_peer_state.read().unwrap(); + if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) { + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + match peer_state.channel_by_id.entry(msg.channel_id) { + hash_map::Entry::Occupied(mut chan) => { + let was_frozen_for_monitor = chan.get().is_awaiting_monitor_update(); + let raa_updates = break_chan_entry!(self, + chan.get_mut().revoke_and_ack(&msg, &self.logger), channel_state, chan); + htlcs_to_fail = raa_updates.holding_cell_failed_htlcs; + if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), raa_updates.monitor_update) { + if was_frozen_for_monitor { + assert!(raa_updates.commitment_update.is_none()); + assert!(raa_updates.accepted_htlcs.is_empty()); + assert!(raa_updates.failed_htlcs.is_empty()); + assert!(raa_updates.finalized_claimed_htlcs.is_empty()); + break Err(MsgHandleErrInternal::ignore_no_close("Previous monitor update failure prevented responses to RAA".to_owned())); + } else { + if let Err(e) = handle_monitor_err!(self, e, channel_state, chan, + RAACommitmentOrder::CommitmentFirst, false, + raa_updates.commitment_update.is_some(), false, + raa_updates.accepted_htlcs, raa_updates.failed_htlcs, + raa_updates.finalized_claimed_htlcs) { + break Err(e); + } else { unreachable!(); } + } } - } - if let Some(updates) = raa_updates.commitment_update { - channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { - node_id: counterparty_node_id.clone(), - updates, - }); - } - break Ok((raa_updates.accepted_htlcs, raa_updates.failed_htlcs, - raa_updates.finalized_claimed_htlcs, - chan.get().get_short_channel_id() - .unwrap_or(chan.get().outbound_scid_alias()), - chan.get().get_funding_txo().unwrap())) - }, - hash_map::Entry::Vacant(_) => break Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id)) + if let Some(updates) = raa_updates.commitment_update { + channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { + node_id: counterparty_node_id.clone(), + updates, + }); + } + break Ok((raa_updates.accepted_htlcs, raa_updates.failed_htlcs, + raa_updates.finalized_claimed_htlcs, + chan.get().get_short_channel_id() + .unwrap_or(chan.get().outbound_scid_alias()), + chan.get().get_funding_txo().unwrap())) + }, + hash_map::Entry::Vacant(_) => break Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) + } + } else { + break Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) } }; self.fail_holding_cell_htlcs(htlcs_to_fail, msg.channel_id, counterparty_node_id); @@ -4844,14 +5090,18 @@ impl ChannelMana fn internal_update_fee(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFee) -> Result<(), MsgHandleErrInternal> { let mut channel_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_lock; - match channel_state.by_id.entry(msg.channel_id) { - hash_map::Entry::Occupied(mut chan) => { - if chan.get().get_counterparty_node_id() != *counterparty_node_id { - return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id)); - } - try_chan_entry!(self, chan.get_mut().update_fee(&self.fee_estimator, &msg), channel_state, chan); - }, - hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id)) + let per_peer_state = self.per_peer_state.read().unwrap(); + if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) { + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + match peer_state.channel_by_id.entry(msg.channel_id) { + hash_map::Entry::Occupied(mut chan) => { + try_chan_entry!(self, chan.get_mut().update_fee(&self.fee_estimator, &msg), channel_state, chan); + }, + hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) + } + } else { + return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)); } Ok(()) } @@ -4860,24 +5110,28 @@ impl ChannelMana let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_state_lock; - match channel_state.by_id.entry(msg.channel_id) { - hash_map::Entry::Occupied(mut chan) => { - if chan.get().get_counterparty_node_id() != *counterparty_node_id { - return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id)); - } - if !chan.get().is_usable() { - return Err(MsgHandleErrInternal::from_no_close(LightningError{err: "Got an announcement_signatures before we were ready for it".to_owned(), action: msgs::ErrorAction::IgnoreError})); - } + let per_peer_state = self.per_peer_state.read().unwrap(); + if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) { + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + match peer_state.channel_by_id.entry(msg.channel_id) { + hash_map::Entry::Occupied(mut chan) => { + if !chan.get().is_usable() { + return Err(MsgHandleErrInternal::from_no_close(LightningError{err: "Got an announcement_signatures before we were ready for it".to_owned(), action: msgs::ErrorAction::IgnoreError})); + } - channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelAnnouncement { - msg: try_chan_entry!(self, chan.get_mut().announcement_signatures( - self.get_our_node_id(), self.genesis_hash.clone(), self.best_block.read().unwrap().height(), msg), channel_state, chan), - // Note that announcement_signatures fails if the channel cannot be announced, - // so get_channel_update_for_broadcast will never fail by the time we get here. - update_msg: self.get_channel_update_for_broadcast(chan.get()).unwrap(), - }); - }, - hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id)) + channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelAnnouncement { + msg: try_chan_entry!(self, chan.get_mut().announcement_signatures( + self.get_our_node_id(), self.genesis_hash.clone(), self.best_block.read().unwrap().height(), msg), channel_state, chan), + // Note that announcement_signatures fails if the channel cannot be announced, + // so get_channel_update_for_broadcast will never fail by the time we get here. + update_msg: self.get_channel_update_for_broadcast(chan.get()).unwrap(), + }); + }, + hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) + } + } else { + return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)); } Ok(()) } @@ -4886,82 +5140,91 @@ impl ChannelMana fn internal_channel_update(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelUpdate) -> Result { let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_state_lock; - let chan_id = match channel_state.short_to_id.get(&msg.contents.short_channel_id) { - Some(chan_id) => chan_id.clone(), + let (chan_counterparty_node_id, chan_id) = match channel_state.short_to_chan_info.get(&msg.contents.short_channel_id) { + Some((cp_id, chan_id)) => (cp_id, chan_id.clone()), None => { // It's not a local channel return Ok(NotifyOption::SkipPersist) } }; - match channel_state.by_id.entry(chan_id) { - hash_map::Entry::Occupied(mut chan) => { - if chan.get().get_counterparty_node_id() != *counterparty_node_id { - if chan.get().should_announce() { - // If the announcement is about a channel of ours which is public, some - // other peer may simply be forwarding all its gossip to us. Don't provide - // a scary-looking error message and return Ok instead. + let per_peer_state = self.per_peer_state.read().unwrap(); + if let Some(peer_state_mutex) = per_peer_state.get(chan_counterparty_node_id) { + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + match peer_state.channel_by_id.entry(chan_id) { + hash_map::Entry::Occupied(mut chan) => { + if chan.get().get_counterparty_node_id() != *counterparty_node_id { + if chan.get().should_announce() { + // If the announcement is about a channel of ours which is public, some + // other peer may simply be forwarding all its gossip to us. Don't provide + // a scary-looking error message and return Ok instead. + return Ok(NotifyOption::SkipPersist); + } + return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a channel_update for a channel from the wrong node - it shouldn't know about our private channels!".to_owned(), chan_id)); + } + let were_node_one = self.get_our_node_id().serialize()[..] < chan.get().get_counterparty_node_id().serialize()[..]; + let msg_from_node_one = msg.contents.flags & 1 == 0; + if were_node_one == msg_from_node_one { return Ok(NotifyOption::SkipPersist); + } else { + try_chan_entry!(self, chan.get_mut().channel_update(&msg), channel_state, chan); } - return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a channel_update for a channel from the wrong node - it shouldn't know about our private channels!".to_owned(), chan_id)); - } - let were_node_one = self.get_our_node_id().serialize()[..] < chan.get().get_counterparty_node_id().serialize()[..]; - let msg_from_node_one = msg.contents.flags & 1 == 0; - if were_node_one == msg_from_node_one { - return Ok(NotifyOption::SkipPersist); - } else { - try_chan_entry!(self, chan.get_mut().channel_update(&msg), channel_state, chan); - } - }, - hash_map::Entry::Vacant(_) => unreachable!() + }, + hash_map::Entry::Vacant(_) => unreachable!() + } } Ok(NotifyOption::DoPersist) } - fn internal_channel_reestablish(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReestablish) -> Result<(), MsgHandleErrInternal> { - let chan_restoration_res; - let (htlcs_failed_forward, need_lnd_workaround) = { - let mut channel_state_lock = self.channel_state.lock().unwrap(); - let channel_state = &mut *channel_state_lock; - - match channel_state.by_id.entry(msg.channel_id) { - hash_map::Entry::Occupied(mut chan) => { - if chan.get().get_counterparty_node_id() != *counterparty_node_id { - return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id)); - } - // Currently, we expect all holding cell update_adds to be dropped on peer - // disconnect, so Channel's reestablish will never hand us any holding cell - // freed HTLCs to fail backwards. If in the future we no longer drop pending - // add-HTLCs on disconnect, we may be handed HTLCs to fail backwards here. - let responses = try_chan_entry!(self, chan.get_mut().channel_reestablish( - msg, &self.logger, self.our_network_pubkey.clone(), self.genesis_hash, - &*self.best_block.read().unwrap()), channel_state, chan); - let mut channel_update = None; - if let Some(msg) = responses.shutdown_msg { - channel_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown { - node_id: counterparty_node_id.clone(), - msg, - }); - } else if chan.get().is_usable() { - // If the channel is in a usable state (ie the channel is not being shut - // down), send a unicast channel_update to our counterparty to make sure - // they have the latest channel parameters. - if let Ok(msg) = self.get_channel_update_for_unicast(chan.get()) { - channel_update = Some(events::MessageSendEvent::SendChannelUpdate { - node_id: chan.get().get_counterparty_node_id(), + fn internal_channel_reestablish(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReestablish) -> Result<(), MsgHandleErrInternal> { + let chan_restoration_res; + let (htlcs_failed_forward, need_lnd_workaround) = { + let mut channel_state_lock = self.channel_state.lock().unwrap(); + let channel_state = &mut *channel_state_lock; + + let per_peer_state = self.per_peer_state.read().unwrap(); + if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) { + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + match peer_state.channel_by_id.entry(msg.channel_id) { + hash_map::Entry::Occupied(mut chan) => { + // Currently, we expect all holding cell update_adds to be dropped on peer + // disconnect, so Channel's reestablish will never hand us any holding cell + // freed HTLCs to fail backwards. If in the future we no longer drop pending + // add-HTLCs on disconnect, we may be handed HTLCs to fail backwards here. + let responses = try_chan_entry!(self, chan.get_mut().channel_reestablish( + msg, &self.logger, self.our_network_pubkey.clone(), self.genesis_hash, + &*self.best_block.read().unwrap()), channel_state, chan); + let mut channel_update = None; + if let Some(msg) = responses.shutdown_msg { + channel_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown { + node_id: counterparty_node_id.clone(), msg, }); + } else if chan.get().is_usable() { + // If the channel is in a usable state (ie the channel is not being shut + // down), send a unicast channel_update to our counterparty to make sure + // they have the latest channel parameters. + if let Ok(msg) = self.get_channel_update_for_unicast(chan.get()) { + channel_update = Some(events::MessageSendEvent::SendChannelUpdate { + node_id: chan.get().get_counterparty_node_id(), + msg, + }); + } } - } - let need_lnd_workaround = chan.get_mut().workaround_lnd_bug_4006.take(); - chan_restoration_res = handle_chan_restoration_locked!( - self, channel_state_lock, channel_state, chan, responses.raa, responses.commitment_update, responses.order, - responses.mon_update, Vec::new(), None, responses.channel_ready, responses.announcement_sigs); - if let Some(upd) = channel_update { - channel_state.pending_msg_events.push(upd); - } - (responses.holding_cell_failed_htlcs, need_lnd_workaround) - }, - hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id)) + let need_lnd_workaround = chan.get_mut().workaround_lnd_bug_4006.take(); + chan_restoration_res = handle_chan_restoration_locked!( + self, channel_state_lock, channel_state, chan, responses.raa, responses.commitment_update, responses.order, + responses.mon_update, Vec::new(), None, responses.channel_ready, responses.announcement_sigs); + if let Some(upd) = channel_update { + channel_state.pending_msg_events.push(upd); + } + (responses.holding_cell_failed_htlcs, need_lnd_workaround) + }, + hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) + } + } else { + return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)); } }; post_handle_chan_restoration!(self, chan_restoration_res); @@ -4994,28 +5257,38 @@ impl ChannelMana MonitorEvent::UpdateFailed(funding_outpoint) => { let mut channel_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_lock; - let by_id = &mut channel_state.by_id; - let pending_msg_events = &mut channel_state.pending_msg_events; - if let hash_map::Entry::Occupied(chan_entry) = by_id.entry(funding_outpoint.to_channel_id()) { - let mut chan = remove_channel!(self, channel_state, chan_entry); - failed_channels.push(chan.force_shutdown(false)); - if let Ok(update) = self.get_channel_update_for_broadcast(&chan) { - pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { - msg: update - }); + let counterparty_node_id_opt = { + let id_to_peer = self.id_to_peer.lock().unwrap(); + id_to_peer.get(&funding_outpoint.to_channel_id()).cloned() + }; + if let Some(counterparty_node_id) = counterparty_node_id_opt { + let per_peer_state = self.per_peer_state.read().unwrap(); + let pending_msg_events = &mut channel_state.pending_msg_events; + if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) { + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + if let hash_map::Entry::Occupied(chan_entry) = peer_state.channel_by_id.entry(funding_outpoint.to_channel_id()) { + let mut chan = remove_channel!(self, channel_state, chan_entry); + failed_channels.push(chan.force_shutdown(false)); + if let Ok(update) = self.get_channel_update_for_broadcast(&chan) { + pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { + msg: update + }); + } + let reason = if let MonitorEvent::UpdateFailed(_) = monitor_event { + ClosureReason::ProcessingError { err: "Failed to persist ChannelMonitor update during chain sync".to_string() } + } else { + ClosureReason::CommitmentTxConfirmed + }; + self.issue_channel_close_events(&chan, reason); + pending_msg_events.push(events::MessageSendEvent::HandleError { + node_id: chan.get_counterparty_node_id(), + action: msgs::ErrorAction::SendErrorMessage { + msg: msgs::ErrorMessage { channel_id: chan.channel_id(), data: "Channel force-closed".to_owned() } + }, + }); + } } - let reason = if let MonitorEvent::UpdateFailed(_) = monitor_event { - ClosureReason::ProcessingError { err: "Failed to persist ChannelMonitor update during chain sync".to_string() } - } else { - ClosureReason::CommitmentTxConfirmed - }; - self.issue_channel_close_events(&chan, reason); - pending_msg_events.push(events::MessageSendEvent::HandleError { - node_id: chan.get_counterparty_node_id(), - action: msgs::ErrorAction::SendErrorMessage { - msg: msgs::ErrorMessage { channel_id: chan.channel_id(), data: "Channel force-closed".to_owned() } - }, - }); } }, MonitorEvent::UpdateCompleted { funding_txo, monitor_update_id } => { @@ -5055,43 +5328,46 @@ impl ChannelMana { let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_state_lock; - let by_id = &mut channel_state.by_id; - let short_to_id = &mut channel_state.short_to_id; + let short_to_chan_info = &mut channel_state.short_to_chan_info; let pending_msg_events = &mut channel_state.pending_msg_events; - - by_id.retain(|channel_id, chan| { - match chan.maybe_free_holding_cell_htlcs(&self.logger) { - Ok((commitment_opt, holding_cell_failed_htlcs)) => { - if !holding_cell_failed_htlcs.is_empty() { - failed_htlcs.push(( - holding_cell_failed_htlcs, - *channel_id, - chan.get_counterparty_node_id() - )); - } - if let Some((commitment_update, monitor_update)) = commitment_opt { - if let Err(e) = self.chain_monitor.update_channel(chan.get_funding_txo().unwrap(), monitor_update) { - has_monitor_update = true; - let (res, close_channel) = handle_monitor_err!(self, e, short_to_id, chan, RAACommitmentOrder::CommitmentFirst, channel_id, COMMITMENT_UPDATE_ONLY); - handle_errors.push((chan.get_counterparty_node_id(), res)); - if close_channel { return false; } - } else { - pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { - node_id: chan.get_counterparty_node_id(), - updates: commitment_update, - }); + let per_peer_state = self.per_peer_state.read().unwrap(); + for (_cp_id, peer_state_mutex) in per_peer_state.iter() { + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + peer_state.channel_by_id.retain(|channel_id, chan| { + match chan.maybe_free_holding_cell_htlcs(&self.logger) { + Ok((commitment_opt, holding_cell_failed_htlcs)) => { + if !holding_cell_failed_htlcs.is_empty() { + failed_htlcs.push(( + holding_cell_failed_htlcs, + *channel_id, + chan.get_counterparty_node_id() + )); } + if let Some((commitment_update, monitor_update)) = commitment_opt { + if let Err(e) = self.chain_monitor.update_channel(chan.get_funding_txo().unwrap(), monitor_update) { + has_monitor_update = true; + let (res, close_channel) = handle_monitor_err!(self, e, short_to_chan_info, chan, RAACommitmentOrder::CommitmentFirst, channel_id, COMMITMENT_UPDATE_ONLY); + handle_errors.push((chan.get_counterparty_node_id(), res)); + if close_channel { return false; } + } else { + pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { + node_id: chan.get_counterparty_node_id(), + updates: commitment_update, + }); + } + } + true + }, + Err(e) => { + let (close_channel, res) = convert_chan_err!(self, e, short_to_chan_info, chan, channel_id); + handle_errors.push((chan.get_counterparty_node_id(), Err(res))); + // ChannelClosed event is generated by handle_error for us + !close_channel } - true - }, - Err(e) => { - let (close_channel, res) = convert_chan_err!(self, e, short_to_id, chan, channel_id); - handle_errors.push((chan.get_counterparty_node_id(), Err(res))); - // ChannelClosed event is generated by handle_error for us - !close_channel } - } - }); + }); + } } let has_update = has_monitor_update || !failed_htlcs.is_empty() || !handle_errors.is_empty(); @@ -5115,44 +5391,48 @@ impl ChannelMana { let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_state_lock; - let by_id = &mut channel_state.by_id; - let short_to_id = &mut channel_state.short_to_id; + let short_to_chan_info = &mut channel_state.short_to_chan_info; let pending_msg_events = &mut channel_state.pending_msg_events; - by_id.retain(|channel_id, chan| { - match chan.maybe_propose_closing_signed(&self.fee_estimator, &self.logger) { - Ok((msg_opt, tx_opt)) => { - if let Some(msg) = msg_opt { - has_update = true; - pending_msg_events.push(events::MessageSendEvent::SendClosingSigned { - node_id: chan.get_counterparty_node_id(), msg, - }); - } - if let Some(tx) = tx_opt { - // We're done with this channel. We got a closing_signed and sent back - // a closing_signed with a closing transaction to broadcast. - if let Ok(update) = self.get_channel_update_for_broadcast(&chan) { - pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { - msg: update + let per_peer_state = self.per_peer_state.read().unwrap(); + for (_cp_id, peer_state_mutex) in per_peer_state.iter() { + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + peer_state.channel_by_id.retain(|channel_id, chan| { + match chan.maybe_propose_closing_signed(&self.fee_estimator, &self.logger) { + Ok((msg_opt, tx_opt)) => { + if let Some(msg) = msg_opt { + has_update = true; + pending_msg_events.push(events::MessageSendEvent::SendClosingSigned { + node_id: chan.get_counterparty_node_id(), msg, }); } + if let Some(tx) = tx_opt { + // We're done with this channel. We got a closing_signed and sent back + // a closing_signed with a closing transaction to broadcast. + if let Ok(update) = self.get_channel_update_for_broadcast(&chan) { + pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { + msg: update + }); + } - self.issue_channel_close_events(chan, ClosureReason::CooperativeClosure); + self.issue_channel_close_events(chan, ClosureReason::CooperativeClosure); - log_info!(self.logger, "Broadcasting {}", log_tx!(tx)); - self.tx_broadcaster.broadcast_transaction(&tx); - update_maps_on_chan_removal!(self, short_to_id, chan); - false - } else { true } - }, - Err(e) => { - has_update = true; - let (close_channel, res) = convert_chan_err!(self, e, short_to_id, chan, channel_id); - handle_errors.push((chan.get_counterparty_node_id(), Err(res))); - !close_channel + log_info!(self.logger, "Broadcasting {}", log_tx!(tx)); + self.tx_broadcaster.broadcast_transaction(&tx); + update_maps_on_chan_removal!(self, short_to_chan_info, chan); + false + } else { true } + }, + Err(e) => { + has_update = true; + let (close_channel, res) = convert_chan_err!(self, e, short_to_chan_info, chan, channel_id); + handle_errors.push((chan.get_counterparty_node_id(), Err(res))); + !close_channel + } } - } - }); + }); + } } for (counterparty_node_id, err) in handle_errors.drain(..) { @@ -5342,7 +5622,7 @@ impl ChannelMana loop { let scid_candidate = fake_scid::Namespace::Phantom.get_fake_scid(best_block.height(), &self.genesis_hash, &self.fake_scid_rand_bytes, &self.keys_manager); // Ensure the generated scid doesn't conflict with a real channel. - match channel_state.short_to_id.entry(scid_candidate) { + match channel_state.short_to_chan_info.entry(scid_candidate) { hash_map::Entry::Occupied(_) => continue, hash_map::Entry::Vacant(_) => return scid_candidate } @@ -5577,10 +5857,14 @@ where fn get_relevant_txids(&self) -> Vec { let channel_state = self.channel_state.lock().unwrap(); - let mut res = Vec::with_capacity(channel_state.short_to_id.len()); - for chan in channel_state.by_id.values() { - if let Some(funding_txo) = chan.get_funding_txo() { - res.push(funding_txo.txid); + let mut res = Vec::with_capacity(channel_state.short_to_chan_info.len()); + for (_, peer_state_mutex) in self.per_peer_state.read().unwrap().iter() { + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + for chan in peer_state.channel_by_id.values() { + if let Some(funding_txo) = chan.get_funding_txo() { + res.push(funding_txo.txid); + } } } res @@ -5620,84 +5904,90 @@ where { let mut channel_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_lock; - let short_to_id = &mut channel_state.short_to_id; + let short_to_chan_info = &mut channel_state.short_to_chan_info; let pending_msg_events = &mut channel_state.pending_msg_events; - channel_state.by_id.retain(|_, channel| { - let res = f(channel); - if let Ok((channel_ready_opt, mut timed_out_pending_htlcs, announcement_sigs)) = res { - for (source, payment_hash) in timed_out_pending_htlcs.drain(..) { - let (failure_code, data) = self.get_htlc_inbound_temp_fail_err_and_data(0x1000|14 /* expiry_too_soon */, &channel); - timed_out_htlcs.push((source, payment_hash, HTLCFailReason::Reason { - failure_code, data, - })); - } - if let Some(channel_ready) = channel_ready_opt { - send_channel_ready!(short_to_id, pending_msg_events, channel, channel_ready); - if channel.is_usable() { - log_trace!(self.logger, "Sending channel_ready with private initial channel_update for our counterparty on channel {}", log_bytes!(channel.channel_id())); - if let Ok(msg) = self.get_channel_update_for_unicast(channel) { - pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate { - node_id: channel.get_counterparty_node_id(), - msg, - }); + let per_peer_state = self.per_peer_state.read().unwrap(); + for (_cp_id, peer_state_mutex) in per_peer_state.iter() { + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + peer_state.channel_by_id.retain(|_, channel| { + let res = f(channel); + if let Ok((channel_ready_opt, mut timed_out_pending_htlcs, announcement_sigs)) = res { + for (source, payment_hash) in timed_out_pending_htlcs.drain(..) { + let (failure_code, data) = self.get_htlc_inbound_temp_fail_err_and_data(0x1000|14 /* expiry_too_soon */, &channel); + timed_out_htlcs.push((source, payment_hash, HTLCFailReason::Reason { + failure_code, data, + })); + } + if let Some(channel_ready) = channel_ready_opt { + send_channel_ready!(short_to_chan_info, pending_msg_events, channel, channel_ready); + if channel.is_usable() { + log_trace!(self.logger, "Sending channel_ready with private initial channel_update for our counterparty on channel {}", log_bytes!(channel.channel_id())); + if let Ok(msg) = self.get_channel_update_for_unicast(channel) { + pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate { + node_id: channel.get_counterparty_node_id(), + msg, + }); + } + } else { + log_trace!(self.logger, "Sending channel_ready WITHOUT channel_update for {}", log_bytes!(channel.channel_id())); } - } else { - log_trace!(self.logger, "Sending channel_ready WITHOUT channel_update for {}", log_bytes!(channel.channel_id())); } - } - if let Some(announcement_sigs) = announcement_sigs { - log_trace!(self.logger, "Sending announcement_signatures for channel {}", log_bytes!(channel.channel_id())); - pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures { - node_id: channel.get_counterparty_node_id(), - msg: announcement_sigs, - }); - if let Some(height) = height_opt { - if let Some(announcement) = channel.get_signed_channel_announcement(self.get_our_node_id(), self.genesis_hash, height) { - pending_msg_events.push(events::MessageSendEvent::BroadcastChannelAnnouncement { - msg: announcement, - // Note that announcement_signatures fails if the channel cannot be announced, - // so get_channel_update_for_broadcast will never fail by the time we get here. - update_msg: self.get_channel_update_for_broadcast(channel).unwrap(), - }); + if let Some(announcement_sigs) = announcement_sigs { + log_trace!(self.logger, "Sending announcement_signatures for channel {}", log_bytes!(channel.channel_id())); + pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures { + node_id: channel.get_counterparty_node_id(), + msg: announcement_sigs, + }); + if let Some(height) = height_opt { + if let Some(announcement) = channel.get_signed_channel_announcement(self.get_our_node_id(), self.genesis_hash, height) { + pending_msg_events.push(events::MessageSendEvent::BroadcastChannelAnnouncement { + msg: announcement, + // Note that announcement_signatures fails if the channel cannot be announced, + // so get_channel_update_for_broadcast will never fail by the time we get here. + update_msg: self.get_channel_update_for_broadcast(channel).unwrap(), + }); + } } } - } - if channel.is_our_channel_ready() { - if let Some(real_scid) = channel.get_short_channel_id() { - // If we sent a 0conf channel_ready, and now have an SCID, we add it - // to the short_to_id map here. Note that we check whether we can relay - // using the real SCID at relay-time (i.e. enforce option_scid_alias - // then), and if the funding tx is ever un-confirmed we force-close the - // channel, ensuring short_to_id is always consistent. - let scid_insert = short_to_id.insert(real_scid, channel.channel_id()); - assert!(scid_insert.is_none() || scid_insert.unwrap() == channel.channel_id(), - "SCIDs should never collide - ensure you weren't behind by a full {} blocks when creating channels", - fake_scid::MAX_SCID_BLOCKS_FROM_NOW); + if channel.is_our_channel_ready() { + if let Some(real_scid) = channel.get_short_channel_id() { + // If we sent a 0conf channel_ready, and now have an SCID, we add it + // to the short_to_chan_info map here. Note that we check whether we + // can relay using the real SCID at relay-time (i.e. + // enforce option_scid_alias then), and if the funding tx is ever + // un-confirmed we force-close the channel, ensuring short_to_chan_info + // is always consistent. + let scid_insert = short_to_chan_info.insert(real_scid, (channel.get_counterparty_node_id(), channel.channel_id())); + assert!(scid_insert.is_none() || scid_insert.unwrap() == (channel.get_counterparty_node_id(), channel.channel_id()), + "SCIDs should never collide - ensure you weren't behind by a full {} blocks when creating channels", + fake_scid::MAX_SCID_BLOCKS_FROM_NOW); + } } - } - } else if let Err(reason) = res { - update_maps_on_chan_removal!(self, short_to_id, channel); - // It looks like our counterparty went on-chain or funding transaction was - // reorged out of the main chain. Close the channel. - failed_channels.push(channel.force_shutdown(true)); - if let Ok(update) = self.get_channel_update_for_broadcast(&channel) { - pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { - msg: update + } else if let Err(reason) = res { + update_maps_on_chan_removal!(self, short_to_chan_info, channel); + // It looks like our counterparty went on-chain or funding transaction was + // reorged out of the main chain. Close the channel. + failed_channels.push(channel.force_shutdown(true)); + if let Ok(update) = self.get_channel_update_for_broadcast(&channel) { + pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { + msg: update + }); + } + let reason_message = format!("{}", reason); + self.issue_channel_close_events(channel, reason); + pending_msg_events.push(events::MessageSendEvent::HandleError { + node_id: channel.get_counterparty_node_id(), + action: msgs::ErrorAction::SendErrorMessage { msg: msgs::ErrorMessage { + channel_id: channel.channel_id(), + data: reason_message, + } }, }); + return false; } - let reason_message = format!("{}", reason); - self.issue_channel_close_events(channel, reason); - pending_msg_events.push(events::MessageSendEvent::HandleError { - node_id: channel.get_counterparty_node_id(), - action: msgs::ErrorAction::SendErrorMessage { msg: msgs::ErrorMessage { - channel_id: channel.channel_id(), - data: reason_message, - } }, - }); - return false; - } - true - }); + true + }); + } if let Some(height) = height_opt { channel_state.claimable_htlcs.retain(|payment_hash, (_, htlcs)| { @@ -5867,22 +6157,27 @@ impl let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_state_lock; let pending_msg_events = &mut channel_state.pending_msg_events; - let short_to_id = &mut channel_state.short_to_id; + let short_to_chan_info = &mut channel_state.short_to_chan_info; log_debug!(self.logger, "Marking channels with {} disconnected and generating channel_updates. We believe we {} make future connections to this peer.", log_pubkey!(counterparty_node_id), if no_connection_possible { "cannot" } else { "can" }); - channel_state.by_id.retain(|_, chan| { - if chan.get_counterparty_node_id() == *counterparty_node_id { - chan.remove_uncommitted_htlcs_and_mark_paused(&self.logger); - if chan.is_shutdown() { - update_maps_on_chan_removal!(self, short_to_id, chan); - self.issue_channel_close_events(chan, ClosureReason::DisconnectedPeer); - return false; - } else { - no_channels_remain = false; + let per_peer_state = self.per_peer_state.read().unwrap(); + for (_cp_id, peer_state_mutex) in per_peer_state.iter() { + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + peer_state.channel_by_id.retain(|_, chan| { + if chan.get_counterparty_node_id() == *counterparty_node_id { + chan.remove_uncommitted_htlcs_and_mark_paused(&self.logger); + if chan.is_shutdown() { + update_maps_on_chan_removal!(self, short_to_chan_info, chan); + self.issue_channel_close_events(chan, ClosureReason::DisconnectedPeer); + return false; + } else { + no_channels_remain = false; + } } - } - true - }); + true + }); + } pending_msg_events.retain(|msg| { match msg { &events::MessageSendEvent::SendAcceptChannel { ref node_id, .. } => node_id != counterparty_node_id, @@ -5927,6 +6222,7 @@ impl match peer_state_lock.entry(counterparty_node_id.clone()) { hash_map::Entry::Vacant(e) => { e.insert(Mutex::new(PeerState { + channel_by_id: HashMap::new(), latest_features: init_msg.features.clone(), })); }, @@ -5939,23 +6235,28 @@ impl let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_state_lock; let pending_msg_events = &mut channel_state.pending_msg_events; - channel_state.by_id.retain(|_, chan| { - if chan.get_counterparty_node_id() == *counterparty_node_id { - if !chan.have_received_message() { - // If we created this (outbound) channel while we were disconnected from the - // peer we probably failed to send the open_channel message, which is now - // lost. We can't have had anything pending related to this channel, so we just - // drop it. - false - } else { - pending_msg_events.push(events::MessageSendEvent::SendChannelReestablish { - node_id: chan.get_counterparty_node_id(), - msg: chan.get_channel_reestablish(&self.logger), - }); - true - } - } else { true } - }); + let per_peer_state = self.per_peer_state.read().unwrap(); + for (_cp_id, peer_state_mutex) in per_peer_state.iter() { + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + peer_state.channel_by_id.retain(|_, chan| { + if chan.get_counterparty_node_id() == *counterparty_node_id { + if !chan.have_received_message() { + // If we created this (outbound) channel while we were disconnected from the + // peer we probably failed to send the open_channel message, which is now + // lost. We can't have had anything pending related to this channel, so we just + // drop it. + false + } else { + pending_msg_events.push(events::MessageSendEvent::SendChannelReestablish { + node_id: chan.get_counterparty_node_id(), + msg: chan.get_channel_reestablish(&self.logger), + }); + true + } + } else { true } + }); + } //TODO: Also re-broadcast announcement_signatures } @@ -5973,17 +6274,19 @@ impl { // First check if we can advance the channel type and try again. let mut channel_state = self.channel_state.lock().unwrap(); - if let Some(chan) = channel_state.by_id.get_mut(&msg.channel_id) { - if chan.get_counterparty_node_id() != *counterparty_node_id { - return; - } - if let Ok(msg) = chan.maybe_handle_error_without_close(self.genesis_hash) { - channel_state.pending_msg_events.push(events::MessageSendEvent::SendOpenChannel { - node_id: *counterparty_node_id, - msg, - }); - return; - } + let per_peer_state = self.per_peer_state.read().unwrap(); + if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) { + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + if let Some(chan) = peer_state.channel_by_id.get_mut(&msg.channel_id) { + if let Ok(msg) = chan.maybe_handle_error_without_close(self.genesis_hash) { + channel_state.pending_msg_events.push(events::MessageSendEvent::SendOpenChannel { + node_id: *counterparty_node_id, + msg, + }); + return; + } + } else { return; } } } @@ -6422,45 +6725,60 @@ impl Writeable f } let channel_state = self.channel_state.lock().unwrap(); - let mut unfunded_channels = 0; - for (_, channel) in channel_state.by_id.iter() { - if !channel.is_funding_initiated() { - unfunded_channels += 1; + let mut htlc_purposes: Vec<&events::PaymentPurpose> = Vec::new(); + { + let per_peer_state = self.per_peer_state.read().unwrap(); + let mut unfunded_channels = 0; + let mut number_of_channels = 0; + for (_, peer_state_mutex) in per_peer_state.iter() { + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + number_of_channels += peer_state.channel_by_id.len(); + for (_, channel) in peer_state.channel_by_id.iter() { + if !channel.is_funding_initiated() { + unfunded_channels += 1; + } + } } - } - ((channel_state.by_id.len() - unfunded_channels) as u64).write(writer)?; - for (_, channel) in channel_state.by_id.iter() { - if channel.is_funding_initiated() { - channel.write(writer)?; + + ((number_of_channels - unfunded_channels) as u64).write(writer)?; + + for (_, peer_state_mutex) in per_peer_state.iter() { + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + for (_, channel) in peer_state.channel_by_id.iter() { + if channel.is_funding_initiated() { + channel.write(writer)?; + } + } } - } - (channel_state.forward_htlcs.len() as u64).write(writer)?; - for (short_channel_id, pending_forwards) in channel_state.forward_htlcs.iter() { - short_channel_id.write(writer)?; - (pending_forwards.len() as u64).write(writer)?; - for forward in pending_forwards { - forward.write(writer)?; + (channel_state.forward_htlcs.len() as u64).write(writer)?; + for (short_channel_id, pending_forwards) in channel_state.forward_htlcs.iter() { + short_channel_id.write(writer)?; + (pending_forwards.len() as u64).write(writer)?; + for forward in pending_forwards { + forward.write(writer)?; + } } - } - let mut htlc_purposes: Vec<&events::PaymentPurpose> = Vec::new(); - (channel_state.claimable_htlcs.len() as u64).write(writer)?; - for (payment_hash, (purpose, previous_hops)) in channel_state.claimable_htlcs.iter() { - payment_hash.write(writer)?; - (previous_hops.len() as u64).write(writer)?; - for htlc in previous_hops.iter() { - htlc.write(writer)?; + (channel_state.claimable_htlcs.len() as u64).write(writer)?; + for (payment_hash, (purpose, previous_hops)) in channel_state.claimable_htlcs.iter() { + payment_hash.write(writer)?; + (previous_hops.len() as u64).write(writer)?; + for htlc in previous_hops.iter() { + htlc.write(writer)?; + } + htlc_purposes.push(purpose); } - htlc_purposes.push(purpose); - } - let per_peer_state = self.per_peer_state.write().unwrap(); - (per_peer_state.len() as u64).write(writer)?; - for (peer_pubkey, peer_state_mutex) in per_peer_state.iter() { - peer_pubkey.write(writer)?; - let peer_state = peer_state_mutex.lock().unwrap(); - peer_state.latest_features.write(writer)?; + (per_peer_state.len() as u64).write(writer)?; + for (peer_pubkey, peer_state_mutex) in per_peer_state.iter() { + peer_pubkey.write(writer)?; + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + peer_state.latest_features.write(writer)?; + } } let pending_inbound_payments = self.pending_inbound_payments.lock().unwrap(); @@ -6672,8 +6990,9 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> let channel_count: u64 = Readable::read(reader)?; let mut funding_txo_set = HashSet::with_capacity(cmp::min(channel_count as usize, 128)); - let mut by_id = HashMap::with_capacity(cmp::min(channel_count as usize, 128)); - let mut short_to_id = HashMap::with_capacity(cmp::min(channel_count as usize, 128)); + let mut id_to_peer = HashMap::with_capacity(cmp::min(channel_count as usize, 128)); + let mut peer_channels: HashMap>> = HashMap::with_capacity(cmp::min(channel_count as usize, 128)); + let mut short_to_chan_info = HashMap::with_capacity(cmp::min(channel_count as usize, 128)); let mut channel_closures = Vec::new(); for _ in 0..channel_count { let mut channel: Channel = Channel::read(reader, (&args.keys_manager, best_block_height))?; @@ -6713,9 +7032,20 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> } else { log_info!(args.logger, "Successfully loaded channel {}", log_bytes!(channel.channel_id())); if let Some(short_channel_id) = channel.get_short_channel_id() { - short_to_id.insert(short_channel_id, channel.channel_id()); + short_to_chan_info.insert(short_channel_id, (channel.get_counterparty_node_id(), channel.channel_id())); + } + id_to_peer.insert(channel.channel_id(), channel.get_counterparty_node_id()); + match peer_channels.entry(channel.get_counterparty_node_id()) { + hash_map::Entry::Occupied(mut entry) => { + let by_id_map = entry.get_mut(); + by_id_map.insert(channel.channel_id(), channel); + }, + hash_map::Entry::Vacant(entry) => { + let mut by_id_map = HashMap::new(); + by_id_map.insert(channel.channel_id(), channel); + entry.insert(by_id_map); + } } - by_id.insert(channel.channel_id(), channel); } } else { log_error!(args.logger, "Missing ChannelMonitor for channel {} needed by ChannelManager.", log_bytes!(channel.channel_id())); @@ -6760,10 +7090,11 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> } let peer_count: u64 = Readable::read(reader)?; - let mut per_peer_state = HashMap::with_capacity(cmp::min(peer_count as usize, MAX_ALLOC_SIZE/mem::size_of::<(PublicKey, Mutex)>())); + let mut per_peer_state = HashMap::with_capacity(cmp::min(peer_count as usize, MAX_ALLOC_SIZE/mem::size_of::<(PublicKey, Mutex>)>())); for _ in 0..peer_count { let peer_pubkey = Readable::read(reader)?; let peer_state = PeerState { + channel_by_id: peer_channels.remove(&peer_pubkey).unwrap_or(HashMap::new()), latest_features: Readable::read(reader)?, }; per_peer_state.insert(peer_pubkey, Mutex::new(peer_state)); @@ -6856,7 +7187,7 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> // We only rebuild the pending payments map if we were most recently serialized by // 0.0.102+ for (_, monitor) in args.channel_monitors.iter() { - if by_id.get(&monitor.get_funding_txo().0.to_channel_id()).is_none() { + if id_to_peer.get(&monitor.get_funding_txo().0.to_channel_id()).is_none() { for (htlc_source, htlc) in monitor.get_pending_outbound_htlcs() { if let HTLCSource::OutboundRoute { payment_id, session_priv, path, payment_secret, .. } = htlc_source { if path.is_empty() { @@ -6956,28 +7287,32 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> } let mut outbound_scid_aliases = HashSet::new(); - for (chan_id, chan) in by_id.iter_mut() { - if chan.outbound_scid_alias() == 0 { - let mut outbound_scid_alias; - loop { - outbound_scid_alias = fake_scid::Namespace::OutboundAlias - .get_fake_scid(best_block_height, &genesis_hash, fake_scid_rand_bytes.as_ref().unwrap(), &args.keys_manager); - if outbound_scid_aliases.insert(outbound_scid_alias) { break; } - } - chan.set_outbound_scid_alias(outbound_scid_alias); - } else if !outbound_scid_aliases.insert(chan.outbound_scid_alias()) { - // Note that in rare cases its possible to hit this while reading an older - // channel if we just happened to pick a colliding outbound alias above. - log_error!(args.logger, "Got duplicate outbound SCID alias; {}", chan.outbound_scid_alias()); - return Err(DecodeError::InvalidValue); - } - if chan.is_usable() { - if short_to_id.insert(chan.outbound_scid_alias(), *chan_id).is_some() { + for (_peer_node_id, peer_state_mutex) in per_peer_state.iter_mut() { + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + for (chan_id, chan) in peer_state.channel_by_id.iter_mut() { + if chan.outbound_scid_alias() == 0 { + let mut outbound_scid_alias; + loop { + outbound_scid_alias = fake_scid::Namespace::OutboundAlias + .get_fake_scid(best_block_height, &genesis_hash, fake_scid_rand_bytes.as_ref().unwrap(), &args.keys_manager); + if outbound_scid_aliases.insert(outbound_scid_alias) { break; } + } + chan.set_outbound_scid_alias(outbound_scid_alias); + } else if !outbound_scid_aliases.insert(chan.outbound_scid_alias()) { // Note that in rare cases its possible to hit this while reading an older // channel if we just happened to pick a colliding outbound alias above. log_error!(args.logger, "Got duplicate outbound SCID alias; {}", chan.outbound_scid_alias()); return Err(DecodeError::InvalidValue); } + if chan.is_usable() { + if short_to_chan_info.insert(chan.outbound_scid_alias(), (chan.get_counterparty_node_id(), *chan_id)).is_some() { + // Note that in rare cases its possible to hit this while reading an older + // channel if we just happened to pick a colliding outbound alias above. + log_error!(args.logger, "Got duplicate outbound SCID alias; {}", chan.outbound_scid_alias()); + return Err(DecodeError::InvalidValue); + } + } } } @@ -7005,8 +7340,13 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> // without the new monitor persisted - we'll end up right back here on // restart. let previous_channel_id = claimable_htlc.prev_hop.outpoint.to_channel_id(); - if let Some(channel) = by_id.get_mut(&previous_channel_id) { - channel.claim_htlc_while_disconnected_dropping_mon_update(claimable_htlc.prev_hop.htlc_id, payment_preimage, &args.logger); + if let Some(peer_node_id) = id_to_peer.get(&previous_channel_id){ + let peer_state_mutex = per_peer_state.get(peer_node_id).unwrap(); + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + if let Some(channel) = peer_state.channel_by_id.get_mut(&previous_channel_id) { + channel.claim_htlc_while_disconnected_dropping_mon_update(claimable_htlc.prev_hop.htlc_id, payment_preimage, &args.logger); + } } if let Some(previous_hop_monitor) = args.channel_monitors.get(&claimable_htlc.prev_hop.outpoint) { previous_hop_monitor.provide_payment_preimage(&payment_hash, &payment_preimage, &args.tx_broadcaster, &args.fee_estimator, &args.logger); @@ -7030,8 +7370,7 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> best_block: RwLock::new(BestBlock::new(best_block_hash, best_block_height)), channel_state: Mutex::new(ChannelHolder { - by_id, - short_to_id, + short_to_chan_info, forward_htlcs, claimable_htlcs, pending_msg_events: Vec::new(), @@ -7041,6 +7380,7 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> pending_outbound_payments: Mutex::new(pending_outbound_payments.unwrap()), outbound_scid_aliases: Mutex::new(outbound_scid_aliases), + id_to_peer: Mutex::new(id_to_peer), fake_scid_rand_bytes: fake_scid_rand_bytes.unwrap(), our_network_key, @@ -7050,7 +7390,7 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> last_node_announcement_serial: AtomicUsize::new(last_node_announcement_serial as usize), highest_seen_timestamp: AtomicUsize::new(highest_seen_timestamp as usize), - per_peer_state: RwLock::new(per_peer_state), + per_peer_state: FairRwLock::new(per_peer_state), pending_events: Mutex::new(pending_events_read), pending_background_events: Mutex::new(pending_background_events_read), @@ -7077,6 +7417,12 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> mod tests { use bitcoin::hashes::Hash; use bitcoin::hashes::sha256::Hash as Sha256; + use bitcoin::hashes::hex::FromHex; + use bitcoin::secp256k1::{PublicKey, Secp256k1, SecretKey}; + use bitcoin::secp256k1::ecdsa::Signature; + use bitcoin::secp256k1::ffi::Signature as FFISignature; + use bitcoin::blockdata::script::Script; + use bitcoin::Txid; use core::time::Duration; use core::sync::atomic::Ordering; use ln::{PaymentPreimage, PaymentHash, PaymentSecret}; @@ -7085,7 +7431,7 @@ mod tests { use ln::features::InitFeatures; use ln::functional_test_utils::*; use ln::msgs; - use ln::msgs::ChannelMessageHandler; + use ln::msgs::{ChannelMessageHandler, OptionalField}; use routing::router::{PaymentParameters, RouteParameters, find_route}; use util::errors::APIError; use util::events::{Event, MessageSendEvent, MessageSendEventsProvider}; @@ -7579,6 +7925,324 @@ mod tests { // Check that using the original payment hash succeeds. assert!(inbound_payment::verify(payment_hash, &payment_data, nodes[0].node.highest_seen_timestamp.load(Ordering::Acquire) as u64, &nodes[0].node.inbound_payment_key, &nodes[0].logger).is_ok()); } + + #[test] + fn test_id_to_peer_coverage() { + // Test that the `ChannelManager:id_to_peer` contains channels which have been assigned + // an `channel_id` (i.e. have had the funding tx created), and that they are removed once + // the channel is successfully closed. + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 1_000_000, 500_000_000, 42, None).unwrap(); + let open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); + nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), InitFeatures::known(), &open_channel); + let accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()); + nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), InitFeatures::known(), &accept_channel); + + let (temporary_channel_id, tx, _funding_output) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 1_000_000, 42); + let channel_id = &tx.txid().into_inner(); + { + // Ensure that the `id_to_peer` map is empty until either party has recieved the + // funding transaction, and have the real `channel_id`. + assert_eq!(nodes[0].node.id_to_peer.lock().unwrap().len(), 0); + assert_eq!(nodes[1].node.id_to_peer.lock().unwrap().len(), 0); + } + + nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()).unwrap(); + { + // Assert that `nodes[0]`'s `id_to_peer` map is populated with the channel as soon as + // as it has the funding transaction. + let nodes_0_lock = nodes[0].node.id_to_peer.lock().unwrap(); + assert_eq!(nodes_0_lock.len(), 1); + assert!(nodes_0_lock.contains_key(channel_id)); + + assert_eq!(nodes[1].node.id_to_peer.lock().unwrap().len(), 0); + } + + let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id()); + + nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg); + { + let nodes_0_lock = nodes[0].node.id_to_peer.lock().unwrap(); + assert_eq!(nodes_0_lock.len(), 1); + assert!(nodes_0_lock.contains_key(channel_id)); + + // Assert that `nodes[1]`'s `id_to_peer` map is populated with the channel as soon as + // as it has the funding transaction. + let nodes_1_lock = nodes[1].node.id_to_peer.lock().unwrap(); + assert_eq!(nodes_1_lock.len(), 1); + assert!(nodes_1_lock.contains_key(channel_id)); + } + check_added_monitors!(nodes[1], 1); + let funding_signed = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id()); + nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed); + check_added_monitors!(nodes[0], 1); + let (channel_ready, _) = create_chan_between_nodes_with_value_confirm(&nodes[0], &nodes[1], &tx); + let (announcement, nodes_0_update, nodes_1_update) = create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &channel_ready); + update_nodes_with_chan_announce(&nodes, 0, 1, &announcement, &nodes_0_update, &nodes_1_update); + + nodes[0].node.close_channel(channel_id, &nodes[1].node.get_our_node_id()).unwrap(); + nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id())); + let nodes_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id()); + nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &nodes_1_shutdown); + + let closing_signed_node_0 = get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id()); + nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &closing_signed_node_0); + { + // Assert that the channel is kept in the `id_to_peer` map for both nodes until the + // channel can be fully closed by both parties (i.e. no outstanding htlcs exists, the + // fee for the closing transaction has been negotiated and the parties has the other + // party's signature for the fee negotiated closing transaction.) + let nodes_0_lock = nodes[0].node.id_to_peer.lock().unwrap(); + assert_eq!(nodes_0_lock.len(), 1); + assert!(nodes_0_lock.contains_key(channel_id)); + + // At this stage, `nodes[1]` has proposed a fee for the closing transaction in the + // `handle_closing_signed` call above. As `nodes[1]` has not yet received the signature + // from `nodes[0]` for the closing transaction with the proposed fee, the channel is + // kept in the `nodes[1]`'s `id_to_peer` map. + let nodes_1_lock = nodes[1].node.id_to_peer.lock().unwrap(); + assert_eq!(nodes_1_lock.len(), 1); + assert!(nodes_1_lock.contains_key(channel_id)); + } + + nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendClosingSigned, nodes[0].node.get_our_node_id())); + { + // `nodes[0]` accepts `nodes[1]`'s proposed fee for the closing transaction, and + // therefore has all it needs to fully close the channel (both signatures for the + // closing transaction). + // Assert that the channel is removed from `nodes[0]`'s `id_to_peer` map as it can be + // fully closed by `nodes[0]`. + assert_eq!(nodes[0].node.id_to_peer.lock().unwrap().len(), 0); + + // Assert that the channel is still in `nodes[1]`'s `id_to_peer` map, as `nodes[1]` + // doesn't have `nodes[0]`'s signature for the closing transaction yet. + let nodes_1_lock = nodes[1].node.id_to_peer.lock().unwrap(); + assert_eq!(nodes_1_lock.len(), 1); + assert!(nodes_1_lock.contains_key(channel_id)); + } + + let (_nodes_0_update, closing_signed_node_0) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id()); + + nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &closing_signed_node_0.unwrap()); + { + // Assert that the channel is removed from both parties `id_to_peer` map once they both + // have everything required to fully close the channel. + assert_eq!(nodes[0].node.id_to_peer.lock().unwrap().len(), 0); + assert_eq!(nodes[1].node.id_to_peer.lock().unwrap().len(), 0); + } + let (_nodes_1_update, _none) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id()); + + // Clear the `ChannelClosed` event for the nodes. + nodes[0].node.get_and_clear_pending_events(); + nodes[1].node.get_and_clear_pending_events(); + } + + fn check_unkown_peer_msg_event<'a, 'b: 'a, 'c: 'b>(node: &Node<'a, 'b, 'c>, closing_node_id: PublicKey, closed_channel_id: [u8; 32]){ + let close_msg_ev = node.node.get_and_clear_pending_msg_events(); + let expected_error_str = format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", closing_node_id); + match close_msg_ev[0] { + MessageSendEvent::HandleError { + ref node_id, action: msgs::ErrorAction::SendErrorMessage { + msg: msgs::ErrorMessage { ref channel_id, ref data } + } + } => { + assert_eq!(*node_id, closing_node_id); + assert_eq!(*data, expected_error_str); + assert_eq!(*channel_id, closed_channel_id); + } + _ => panic!("Unexpected event"), + } + } + + fn check_unkown_peer_error(res_err: Result, expected_public_key: PublicKey) { + match res_err { + Err(APIError::APIMisuseError { err }) => { + assert_eq!(err, format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", expected_public_key)); + }, + Ok(_) => panic!("Unexpected Ok"), + Err(_) => panic!("Unexpected Error"), + } + } + + #[test] + fn test_api_calls_with_unkown_counterparty_node() { + // Tests that our API functions and message handlers that expects a `counterparty_node_id` + // as input, behaves as expected if the `counterparty_node_id` is an unkown peer in the + // `ChannelManager::per_peer_state` map. + let chanmon_cfg = create_chanmon_cfgs(2); + let node_cfg = create_node_cfgs(2, &chanmon_cfg); + let node_chanmgr = create_node_chanmgrs(2, &node_cfg, &[None, None]); + let nodes = create_network(2, &node_cfg, &node_chanmgr); + + // Boilerplate code to produce `open_channel` and `accept_channel` msgs more densly than + // creating dummy ones. + nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 1_000_000, 500_000_000, 42, None).unwrap(); + let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); + nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), InitFeatures::known(), &open_channel_msg); + let accept_channel_msg = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()); + + // Dummy values + let channel_id = [4; 32]; + let signature = Signature::from(unsafe { FFISignature::new() }); + let unkown_public_key = PublicKey::from_secret_key(&Secp256k1::signing_only(), &SecretKey::from_slice(&[42; 32]).unwrap()); + + // Dummy msgs + let funding_created_msg = msgs::FundingCreated { + temporary_channel_id: open_channel_msg.temporary_channel_id, + funding_txid: Txid::from_hex("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap(), + funding_output_index: 0, + signature: signature, + }; + + let funding_signed_msg = msgs::FundingSigned { + channel_id: channel_id, + signature: signature, + }; + + let channel_ready_msg = msgs::ChannelReady { + channel_id: channel_id, + next_per_commitment_point: unkown_public_key, + short_channel_id_alias: None, + }; + + let announcement_signatures_msg = msgs::AnnouncementSignatures { + channel_id: channel_id, + short_channel_id: 0, + node_signature: signature, + bitcoin_signature: signature, + }; + + let channel_reestablish_msg = msgs::ChannelReestablish { + channel_id: channel_id, + next_local_commitment_number: 0, + next_remote_commitment_number: 0, + data_loss_protect: OptionalField::Absent, + }; + + let closing_signed_msg = msgs::ClosingSigned { + channel_id: channel_id, + fee_satoshis: 1000, + signature: signature, + fee_range: None, + }; + + let shutdown_msg = msgs::Shutdown { + channel_id: channel_id, + scriptpubkey: Script::new(), + }; + + let onion_routing_packet = msgs::OnionPacket { + version: 255, + public_key: Ok(unkown_public_key), + hop_data: [1; 20*65], + hmac: [2; 32] + }; + + let update_add_htlc_msg = msgs::UpdateAddHTLC { + channel_id: channel_id, + htlc_id: 0, + amount_msat: 1000000, + payment_hash: PaymentHash([1; 32]), + cltv_expiry: 821716, + onion_routing_packet + }; + + let commitment_signed_msg = msgs::CommitmentSigned { + channel_id: channel_id, + signature: signature, + htlc_signatures: Vec::new(), + }; + + let update_fee_msg = msgs::UpdateFee { + channel_id: channel_id, + feerate_per_kw: 1000, + }; + + let malformed_update_msg = msgs::UpdateFailMalformedHTLC{ + channel_id: channel_id, + htlc_id: 0, + sha256_of_onion: [1; 32], + failure_code: 0x8000, + }; + + let fulfill_update_msg = msgs::UpdateFulfillHTLC{ + channel_id: channel_id, + htlc_id: 0, + payment_preimage: PaymentPreimage([1; 32]), + }; + + let fail_update_msg = msgs::UpdateFailHTLC{ + channel_id: channel_id, + htlc_id: 0, + reason: msgs::OnionErrorPacket { data: Vec::new()}, + }; + + let revoke_and_ack_msg = msgs::RevokeAndACK { + channel_id: channel_id, + per_commitment_secret: [1; 32], + next_per_commitment_point: unkown_public_key, + }; + + // Test the API functions and message handlers. + check_unkown_peer_error(nodes[0].node.create_channel(unkown_public_key, 1_000_000, 500_000_000, 42, None), unkown_public_key); + + nodes[1].node.handle_open_channel(&unkown_public_key, InitFeatures::known(), &open_channel_msg); + check_unkown_peer_msg_event(&nodes[1], unkown_public_key, open_channel_msg.temporary_channel_id); + + nodes[0].node.handle_accept_channel(&unkown_public_key, InitFeatures::known(), &accept_channel_msg); + check_unkown_peer_msg_event(&nodes[0], unkown_public_key, open_channel_msg.temporary_channel_id); + + check_unkown_peer_error(nodes[0].node.accept_inbound_channel(&open_channel_msg.temporary_channel_id, &unkown_public_key, 42), unkown_public_key); + nodes[1].node.handle_funding_created(&unkown_public_key, &funding_created_msg); + check_unkown_peer_msg_event(&nodes[1], unkown_public_key, open_channel_msg.temporary_channel_id); + + nodes[0].node.handle_funding_signed(&unkown_public_key, &funding_signed_msg); + check_unkown_peer_msg_event(&nodes[0], unkown_public_key, channel_id); + + nodes[0].node.handle_channel_ready(&unkown_public_key, &channel_ready_msg); + check_unkown_peer_msg_event(&nodes[0], unkown_public_key, channel_id); + + nodes[1].node.handle_announcement_signatures(&unkown_public_key, &announcement_signatures_msg); + check_unkown_peer_msg_event(&nodes[1], unkown_public_key, channel_id); + + check_unkown_peer_error(nodes[0].node.close_channel(&channel_id, &unkown_public_key), unkown_public_key); + + check_unkown_peer_error(nodes[0].node.force_close_channel(&channel_id, &unkown_public_key), unkown_public_key); + + nodes[0].node.handle_shutdown(&unkown_public_key, &InitFeatures::known(), &shutdown_msg); + check_unkown_peer_msg_event(&nodes[0], unkown_public_key, channel_id); + + nodes[1].node.handle_closing_signed(&unkown_public_key, &closing_signed_msg); + check_unkown_peer_msg_event(&nodes[1], unkown_public_key, channel_id); + + nodes[0].node.handle_channel_reestablish(&unkown_public_key, &channel_reestablish_msg); + check_unkown_peer_msg_event(&nodes[0], unkown_public_key, channel_id); + + nodes[1].node.handle_update_add_htlc(&unkown_public_key, &update_add_htlc_msg); + check_unkown_peer_msg_event(&nodes[1], unkown_public_key, channel_id); + + nodes[1].node.handle_commitment_signed(&unkown_public_key, &commitment_signed_msg); + check_unkown_peer_msg_event(&nodes[1], unkown_public_key, channel_id); + + nodes[1].node.handle_update_fail_malformed_htlc(&unkown_public_key, &malformed_update_msg); + check_unkown_peer_msg_event(&nodes[1], unkown_public_key, channel_id); + + nodes[1].node.handle_update_fail_htlc(&unkown_public_key, &fail_update_msg); + check_unkown_peer_msg_event(&nodes[1], unkown_public_key, channel_id); + + nodes[1].node.handle_update_fulfill_htlc(&unkown_public_key, &fulfill_update_msg); + check_unkown_peer_msg_event(&nodes[1], unkown_public_key, channel_id); + + nodes[1].node.handle_revoke_and_ack(&unkown_public_key, &revoke_and_ack_msg); + check_unkown_peer_msg_event(&nodes[1], unkown_public_key, channel_id); + + nodes[1].node.handle_update_fee(&unkown_public_key, &update_fee_msg); + check_unkown_peer_msg_event(&nodes[1], unkown_public_key, channel_id); + } } #[cfg(all(any(test, feature = "_test_utils"), feature = "_bench_unstable"))] diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index 38c05998e6a..25feeddeaf2 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -510,20 +510,22 @@ macro_rules! get_htlc_update_msgs { #[cfg(test)] macro_rules! get_channel_ref { - ($node: expr, $lock: ident, $channel_id: expr) => { + ($node: expr, $counterparty_node: expr, $per_peer_state_lock: ident, $peer_state_lock: ident, $channel_id: expr) => { { - $lock = $node.node.channel_state.lock().unwrap(); - $lock.by_id.get_mut(&$channel_id).unwrap() + $per_peer_state_lock = $node.node.per_peer_state.write().unwrap(); + $peer_state_lock = $per_peer_state_lock.get(&$counterparty_node.node.get_our_node_id()).unwrap().lock().unwrap(); + $peer_state_lock.channel_by_id.get_mut(&$channel_id).unwrap() } } } #[cfg(test)] macro_rules! get_feerate { - ($node: expr, $channel_id: expr) => { + ($node: expr, $counterparty_node: expr, $channel_id: expr) => { { - let mut lock; - let chan = get_channel_ref!($node, lock, $channel_id); + let mut per_peer_state_lock; + let mut peer_state_lock; + let chan = get_channel_ref!($node, $counterparty_node, per_peer_state_lock, peer_state_lock, $channel_id); chan.get_feerate() } } @@ -531,10 +533,11 @@ macro_rules! get_feerate { #[cfg(test)] macro_rules! get_opt_anchors { - ($node: expr, $channel_id: expr) => { + ($node: expr, $counterparty_node: expr, $channel_id: expr) => { { - let mut lock; - let chan = get_channel_ref!($node, lock, $channel_id); + let mut per_peer_state_lock; + let mut peer_state_lock; + let chan = get_channel_ref!($node, $counterparty_node, per_peer_state_lock, peer_state_lock, $channel_id); chan.opt_anchors() } } @@ -1558,12 +1561,18 @@ pub fn send_along_route_with_secret<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, payment_id } -pub fn do_pass_along_path<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expected_path: &[&Node<'a, 'b, 'c>], recv_value: u64, our_payment_hash: PaymentHash, our_payment_secret: Option, ev: MessageSendEvent, payment_received_expected: bool, clear_recipient_events: bool, expected_preimage: Option) { +pub fn do_pass_along_path<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expected_path: &[&Node<'a, 'b, 'c>], recv_value: u64, our_payment_hash: PaymentHash, our_payment_secret: Option, ev: MessageSendEvent, payment_received_expected: bool, clear_recipient_events: bool, expected_preimage: Option, expected_channels: Option<&Vec<[u8; 32]>>) { let mut payment_event = SendEvent::from_event(ev); let mut prev_node = origin_node; + if let Some(channel_ids) = expected_channels { + assert_eq!(expected_path.len(), channel_ids.len()); + } for (idx, &node) in expected_path.iter().enumerate() { assert_eq!(node.node.get_our_node_id(), payment_event.node_id); + if let Some(channel_ids) = expected_channels { + assert_eq!(payment_event.msgs[0].channel_id, channel_ids[idx]); + } node.node.handle_update_add_htlc(&prev_node.node.get_our_node_id(), &payment_event.msgs[0]); check_added_monitors!(node, 0); @@ -1608,7 +1617,7 @@ pub fn do_pass_along_path<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expected_p } pub fn pass_along_path<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expected_path: &[&Node<'a, 'b, 'c>], recv_value: u64, our_payment_hash: PaymentHash, our_payment_secret: Option, ev: MessageSendEvent, payment_received_expected: bool, expected_preimage: Option) { - do_pass_along_path(origin_node, expected_path, recv_value, our_payment_hash, our_payment_secret, ev, payment_received_expected, true, expected_preimage); + do_pass_along_path(origin_node, expected_path, recv_value, our_payment_hash, our_payment_secret, ev, payment_received_expected, true, expected_preimage, None); } pub fn pass_along_route<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expected_route: &[&[&Node<'a, 'b, 'c>]], recv_value: u64, our_payment_hash: PaymentHash, our_payment_secret: PaymentSecret) { @@ -1689,8 +1698,8 @@ pub fn do_claim_payment_along_route<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, ($node: expr, $prev_node: expr, $next_node: expr, $new_msgs: expr) => { { $node.node.handle_update_fulfill_htlc(&$prev_node.node.get_our_node_id(), &next_msgs.as_ref().unwrap().0); - let fee = $node.node.channel_state.lock().unwrap() - .by_id.get(&next_msgs.as_ref().unwrap().0.channel_id).unwrap() + let fee = $node.node.per_peer_state.read().unwrap().get(&$prev_node.node.get_our_node_id()) + .unwrap().lock().unwrap().channel_by_id.get(&next_msgs.as_ref().unwrap().0.channel_id).unwrap() .config.options.forwarding_fee_base_msat; expect_payment_forwarded!($node, $next_node, $prev_node, Some(fee as u64), false, false); expected_total_fee_msat += fee as u64; @@ -2181,9 +2190,10 @@ pub fn get_announce_close_broadcast_events<'a, 'b, 'c>(nodes: &Vec {{ - let chan_lock = $node.node.channel_state.lock().unwrap(); - let chan = chan_lock.by_id.get(&$channel_id).unwrap(); + ($node: expr, $counterparty_node: expr, $channel_id: expr) => {{ + let peer_state_lock = $node.node.per_peer_state.read().unwrap(); + let chan_lock = peer_state_lock.get(&$counterparty_node.node.get_our_node_id()).unwrap().lock().unwrap(); + let chan = chan_lock.channel_by_id.get(&$channel_id).unwrap(); chan.get_value_stat() }} } diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index 76487ddcfd1..1a89186ad4b 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -169,8 +169,11 @@ fn do_test_counterparty_no_reserve(send_from_initiator: bool) { } nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), InitFeatures::known(), &accept_channel_message); { - let mut lock; - let mut chan = get_channel_ref!(if send_from_initiator { &nodes[1] } else { &nodes[0] }, lock, temp_channel_id); + let sender_node = if send_from_initiator { &nodes[1] } else { &nodes[0] }; + let counterparty_node = if send_from_initiator { &nodes[0] } else { &nodes[1] }; + let mut sender_node_per_peer_lock; + let mut sender_node_peer_state_lock; + let mut chan = get_channel_ref!(sender_node, counterparty_node, sender_node_per_peer_lock, sender_node_peer_state_lock, temp_channel_id); chan.holder_selected_channel_reserve_satoshis = 0; chan.holder_max_htlc_value_in_flight_msat = 100_000_000; } @@ -678,16 +681,18 @@ fn test_update_fee_that_funder_cannot_afford() { // Get the EnforcingSigner for each channel, which will be used to (1) get the keys // needed to sign the new commitment tx and (2) sign the new commitment tx. let (local_revocation_basepoint, local_htlc_basepoint, local_funding) = { - let chan_lock = nodes[0].node.channel_state.lock().unwrap(); - let local_chan = chan_lock.by_id.get(&chan.2).unwrap(); + let per_peer_state = nodes[0].node.per_peer_state.read().unwrap(); + let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap(); + let local_chan = chan_lock.channel_by_id.get(&chan.2).unwrap(); let chan_signer = local_chan.get_signer(); let pubkeys = chan_signer.pubkeys(); (pubkeys.revocation_basepoint, pubkeys.htlc_basepoint, pubkeys.funding_pubkey) }; let (remote_delayed_payment_basepoint, remote_htlc_basepoint,remote_point, remote_funding) = { - let chan_lock = nodes[1].node.channel_state.lock().unwrap(); - let remote_chan = chan_lock.by_id.get(&chan.2).unwrap(); + let per_peer_state = nodes[1].node.per_peer_state.read().unwrap(); + let chan_lock = per_peer_state.get(&nodes[0].node.get_our_node_id()).unwrap().lock().unwrap(); + let remote_chan = chan_lock.channel_by_id.get(&chan.2).unwrap(); let chan_signer = remote_chan.get_signer(); let pubkeys = chan_signer.pubkeys(); (pubkeys.delayed_payment_basepoint, pubkeys.htlc_basepoint, @@ -700,8 +705,9 @@ fn test_update_fee_that_funder_cannot_afford() { &remote_htlc_basepoint, &local_revocation_basepoint, &local_htlc_basepoint).unwrap(); let res = { - let local_chan_lock = nodes[0].node.channel_state.lock().unwrap(); - let local_chan = local_chan_lock.by_id.get(&chan.2).unwrap(); + let per_peer_state = nodes[0].node.per_peer_state.read().unwrap(); + let local_chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap(); + let local_chan = local_chan_lock.channel_by_id.get(&chan.2).unwrap(); let local_chan_signer = local_chan.get_signer(); let mut htlcs: Vec<(HTLCOutputInCommitment, ())> = vec![]; let commitment_tx = CommitmentTransaction::new_with_auxiliary_htlc_data( @@ -946,8 +952,8 @@ fn test_update_fee() { check_added_monitors!(nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - assert_eq!(get_feerate!(nodes[0], channel_id), feerate + 30); - assert_eq!(get_feerate!(nodes[1], channel_id), feerate + 30); + assert_eq!(get_feerate!(nodes[0], nodes[1], channel_id), feerate + 30); + assert_eq!(get_feerate!(nodes[1], nodes[0], channel_id), feerate + 30); close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true); check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure); check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure); @@ -1094,6 +1100,63 @@ fn fake_network_test() { check_closed_event!(nodes[3], 1, ClosureReason::CooperativeClosure); } +#[test] +fn test_forward_through_substitute_channel() { + // Test that we forward htlcs over substitute channels if we receive an onion payload that + // specifies that we should forward an htlc over a channel where we have insufficient + // liquidity, but an alternative channel to the same node with enough liquidity exists. + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); + let chan_0_1 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001, InitFeatures::known(), InitFeatures::known()); + let chan_1_2a = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 10000, 1001, InitFeatures::known(), InitFeatures::known()); + let chan_1_2b = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 1000000, 100001, InitFeatures::known(), InitFeatures::known()); + + let mut hops = Vec::with_capacity(2); + hops.push(RouteHop { + pubkey: nodes[1].node.get_our_node_id(), + node_features: NodeFeatures::known(), + short_channel_id: chan_0_1.0.contents.short_channel_id, + channel_features: ChannelFeatures::known(), + fee_msat: 1000, + cltv_expiry_delta: chan_1_2a.0.contents.cltv_expiry_delta as u32 + }); + hops.push(RouteHop { + pubkey: nodes[2].node.get_our_node_id(), + node_features: NodeFeatures::known(), + short_channel_id: chan_1_2a.0.contents.short_channel_id, + channel_features: ChannelFeatures::known(), + fee_msat: 2000000, + cltv_expiry_delta: TEST_FINAL_CLTV + }); + + let (payment_preimage, payment_hash, payment_secret) = get_payment_preimage_hash!(nodes[2], Some(2000000)); + // Explicity specify that is `chan_1_2a` is the last hop in the `route`, which doesn't have + // enough liquidity forward 2000000 msat from `nodes[1]` to `nodes[2]`. + let route = Route { paths: vec![hops], payment_params: None }; + + nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret)).unwrap(); + check_added_monitors!(nodes[0], 1); + + let mut send_events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(send_events.len(), 1); + + // Ensure that `chan_1_2b` is used instead of `chan_1_2a` when the htlc is actaully forwarded. + let expected_channels = vec![chan_0_1.2, chan_1_2b.2]; + do_pass_along_path(&nodes[0], &[&nodes[1], &nodes[2]], 2000000, payment_hash, Some(payment_secret), send_events[0].clone(), true, false, None, Some(&expected_channels)); + + expect_payment_received!(nodes[2], payment_hash, payment_secret, 2000000); + + nodes[2].node.claim_funds(payment_preimage); + check_added_monitors!(nodes[2], 1); + expect_payment_claimed!(nodes[2], payment_hash, 2000000); + + // Clear `nodes[2]`'s pending `UpdateFulfillHTLC` msg, as handling it isn't relevant for this + // test. + nodes[2].node.get_and_clear_pending_msg_events(); +} + #[test] fn holding_cell_htlc_counting() { // Tests that HTLCs in the holding cell count towards the pending HTLC limits on outbound HTLCs @@ -1351,11 +1414,11 @@ fn test_basic_channel_reserve() { let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000, InitFeatures::known(), InitFeatures::known()); - let chan_stat = get_channel_value_stat!(nodes[0], chan.2); + let chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2); let channel_reserve = chan_stat.channel_reserve_msat; // The 2* and +1 are for the fee spike reserve. - let commit_tx_fee = 2 * commit_tx_fee_msat(get_feerate!(nodes[0], chan.2), 1 + 1, get_opt_anchors!(nodes[0], chan.2)); + let commit_tx_fee = 2 * commit_tx_fee_msat(get_feerate!(nodes[0], nodes[1], chan.2), 1 + 1, get_opt_anchors!(nodes[0], nodes[1], chan.2)); let max_can_send = 5000000 - channel_reserve - commit_tx_fee; let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], max_can_send + 1); let err = nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)).err().unwrap(); @@ -1408,15 +1471,16 @@ fn test_fee_spike_violation_fails_htlc() { // nodes[0] just sent. In the code for construction of this message, "local" refers // to the sender of the message, and "remote" refers to the receiver. - let feerate_per_kw = get_feerate!(nodes[0], chan.2); + let feerate_per_kw = get_feerate!(nodes[0], nodes[1], chan.2); const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1; // Get the EnforcingSigner for each channel, which will be used to (1) get the keys // needed to sign the new commitment tx and (2) sign the new commitment tx. let (local_revocation_basepoint, local_htlc_basepoint, local_secret, next_local_point, local_funding) = { - let chan_lock = nodes[0].node.channel_state.lock().unwrap(); - let local_chan = chan_lock.by_id.get(&chan.2).unwrap(); + let per_peer_state = nodes[0].node.per_peer_state.read().unwrap(); + let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap(); + let local_chan = chan_lock.channel_by_id.get(&chan.2).unwrap(); let chan_signer = local_chan.get_signer(); // Make the signer believe we validated another commitment, so we can release the secret chan_signer.get_enforcement_state().last_holder_commitment -= 1; @@ -1428,8 +1492,9 @@ fn test_fee_spike_violation_fails_htlc() { chan_signer.pubkeys().funding_pubkey) }; let (remote_delayed_payment_basepoint, remote_htlc_basepoint, remote_point, remote_funding) = { - let chan_lock = nodes[1].node.channel_state.lock().unwrap(); - let remote_chan = chan_lock.by_id.get(&chan.2).unwrap(); + let per_peer_state = nodes[1].node.per_peer_state.read().unwrap(); + let chan_lock = per_peer_state.get(&nodes[0].node.get_our_node_id()).unwrap().lock().unwrap(); + let remote_chan = chan_lock.channel_by_id.get(&chan.2).unwrap(); let chan_signer = remote_chan.get_signer(); let pubkeys = chan_signer.pubkeys(); (pubkeys.delayed_payment_basepoint, pubkeys.htlc_basepoint, @@ -1456,8 +1521,9 @@ fn test_fee_spike_violation_fails_htlc() { let commitment_number = INITIAL_COMMITMENT_NUMBER - 1; let res = { - let local_chan_lock = nodes[0].node.channel_state.lock().unwrap(); - let local_chan = local_chan_lock.by_id.get(&chan.2).unwrap(); + let per_peer_state = nodes[0].node.per_peer_state.read().unwrap(); + let local_chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap(); + let local_chan = local_chan_lock.channel_by_id.get(&chan.2).unwrap(); let local_chan_signer = local_chan.get_signer(); let commitment_tx = CommitmentTransaction::new_with_auxiliary_htlc_data( commitment_number, @@ -1706,9 +1772,9 @@ fn test_chan_reserve_violation_inbound_htlc_inbound_chan() { let feemsat = 239; let total_routing_fee_msat = (nodes.len() - 2) as u64 * feemsat; - let chan_stat = get_channel_value_stat!(nodes[0], chan.2); - let feerate = get_feerate!(nodes[0], chan.2); - let opt_anchors = get_opt_anchors!(nodes[0], chan.2); + let chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2); + let feerate = get_feerate!(nodes[0], nodes[1], chan.2); + let opt_anchors = get_opt_anchors!(nodes[0], nodes[1], chan.2); // Add a 2* and +1 for the fee spike reserve. let commit_tx_fee_2_htlc = 2*commit_tx_fee_msat(feerate, 2 + 1, opt_anchors); @@ -1796,11 +1862,11 @@ fn test_channel_reserve_holding_cell_htlcs() { let chan_1 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 190000, 1001, InitFeatures::known(), InitFeatures::known()); let chan_2 = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 190000, 1001, InitFeatures::known(), InitFeatures::known()); - let mut stat01 = get_channel_value_stat!(nodes[0], chan_1.2); - let mut stat11 = get_channel_value_stat!(nodes[1], chan_1.2); + let mut stat01 = get_channel_value_stat!(nodes[0], nodes[1], chan_1.2); + let mut stat11 = get_channel_value_stat!(nodes[1], nodes[0], chan_1.2); - let mut stat12 = get_channel_value_stat!(nodes[1], chan_2.2); - let mut stat22 = get_channel_value_stat!(nodes[2], chan_2.2); + let mut stat12 = get_channel_value_stat!(nodes[1], nodes[2], chan_2.2); + let mut stat22 = get_channel_value_stat!(nodes[2], nodes[1], chan_2.2); macro_rules! expect_forward { ($node: expr) => {{ @@ -1814,8 +1880,8 @@ fn test_channel_reserve_holding_cell_htlcs() { let feemsat = 239; // set above let total_fee_msat = (nodes.len() - 2) as u64 * feemsat; - let feerate = get_feerate!(nodes[0], chan_1.2); - let opt_anchors = get_opt_anchors!(nodes[0], chan_1.2); + let feerate = get_feerate!(nodes[0], nodes[1], chan_1.2); + let opt_anchors = get_opt_anchors!(nodes[0], nodes[1], chan_1.2); let recv_value_0 = stat01.counterparty_max_htlc_value_in_flight_msat - total_fee_msat; @@ -1845,10 +1911,10 @@ fn test_channel_reserve_holding_cell_htlcs() { send_payment(&nodes[0], &vec![&nodes[1], &nodes[2]][..], recv_value_0); let (stat01_, stat11_, stat12_, stat22_) = ( - get_channel_value_stat!(nodes[0], chan_1.2), - get_channel_value_stat!(nodes[1], chan_1.2), - get_channel_value_stat!(nodes[1], chan_2.2), - get_channel_value_stat!(nodes[2], chan_2.2), + get_channel_value_stat!(nodes[0], nodes[1], chan_1.2), + get_channel_value_stat!(nodes[1], nodes[0], chan_1.2), + get_channel_value_stat!(nodes[1], nodes[2], chan_2.2), + get_channel_value_stat!(nodes[2], nodes[1], chan_2.2), ); assert_eq!(stat01_.value_to_self_msat, stat01.value_to_self_msat - amt_msat); @@ -1899,7 +1965,7 @@ fn test_channel_reserve_holding_cell_htlcs() { let recv_value_21 = recv_value_2/2 - additional_htlc_cost_msat/2; let recv_value_22 = recv_value_2 - recv_value_21 - total_fee_msat - additional_htlc_cost_msat; { - let stat = get_channel_value_stat!(nodes[0], chan_1.2); + let stat = get_channel_value_stat!(nodes[0], nodes[1], chan_1.2); assert_eq!(stat.value_to_self_msat - (stat.pending_outbound_htlcs_amount_msat + recv_value_21 + recv_value_22 + total_fee_msat + total_fee_msat + commit_tx_fee_3_htlcs), stat.channel_reserve_msat); } @@ -2011,11 +2077,11 @@ fn test_channel_reserve_holding_cell_htlcs() { let commit_tx_fee_1_htlc = 2*commit_tx_fee_msat(feerate, 1 + 1, opt_anchors); let expected_value_to_self = stat01.value_to_self_msat - (recv_value_1 + total_fee_msat) - (recv_value_21 + total_fee_msat) - (recv_value_22 + total_fee_msat) - (recv_value_3 + total_fee_msat); - let stat0 = get_channel_value_stat!(nodes[0], chan_1.2); + let stat0 = get_channel_value_stat!(nodes[0], nodes[1], chan_1.2); assert_eq!(stat0.value_to_self_msat, expected_value_to_self); assert_eq!(stat0.value_to_self_msat, stat0.channel_reserve_msat + commit_tx_fee_1_htlc); - let stat2 = get_channel_value_stat!(nodes[2], chan_2.2); + let stat2 = get_channel_value_stat!(nodes[2], nodes[1], chan_2.2); assert_eq!(stat2.value_to_self_msat, stat22.value_to_self_msat + recv_value_1 + recv_value_21 + recv_value_22 + recv_value_3); } @@ -2049,7 +2115,7 @@ fn channel_reserve_in_flight_removes() { let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); - let b_chan_values = get_channel_value_stat!(nodes[1], chan_1.2); + let b_chan_values = get_channel_value_stat!(nodes[1], nodes[0], chan_1.2); // Route the first two HTLCs. let payment_value_1 = b_chan_values.channel_reserve_msat - b_chan_values.value_to_self_msat - 10000; let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1]], payment_value_1); @@ -3098,7 +3164,8 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use let value = if use_dust { // The dust limit applied to HTLC outputs considers the fee of the HTLC transaction as // well, so HTLCs at exactly the dust limit will not be included in commitment txn. - nodes[2].node.channel_state.lock().unwrap().by_id.get(&chan_2.2).unwrap().holder_dust_limit_satoshis * 1000 + nodes[2].node.per_peer_state.read().unwrap().get(&nodes[1].node.get_our_node_id()) + .unwrap().lock().unwrap().channel_by_id.get(&chan_2.2).unwrap().holder_dust_limit_satoshis * 1000 } else { 3000000 }; let (_, first_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], value); @@ -5503,7 +5570,8 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno send_payment(&nodes[1], &[&nodes[2], &nodes[3], &nodes[5]], 500000); assert_eq!(get_local_commitment_txn!(nodes[3], chan.2)[0].output.len(), 2); - let ds_dust_limit = nodes[3].node.channel_state.lock().unwrap().by_id.get(&chan.2).unwrap().holder_dust_limit_satoshis; + let ds_dust_limit = nodes[3].node.per_peer_state.read().unwrap().get(&nodes[2].node.get_our_node_id()) + .unwrap().lock().unwrap().channel_by_id.get(&chan.2).unwrap().holder_dust_limit_satoshis; // 0th HTLC: let (_, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], ds_dust_limit*1000); // not added < dust limit + HTLC tx fee // 1st HTLC: @@ -6187,10 +6255,10 @@ fn test_fail_holding_cell_htlc_upon_free() { nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap()); - let mut chan_stat = get_channel_value_stat!(nodes[0], chan.2); + let mut chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2); let channel_reserve = chan_stat.channel_reserve_msat; - let feerate = get_feerate!(nodes[0], chan.2); - let opt_anchors = get_opt_anchors!(nodes[0], chan.2); + let feerate = get_feerate!(nodes[0], nodes[1], chan.2); + let opt_anchors = get_opt_anchors!(nodes[0], nodes[1], chan.2); // 2* and +1 HTLCs on the commit tx fee calculation for the fee spike reserve. let max_can_send = 5000000 - channel_reserve - 2*commit_tx_fee_msat(feerate, 1 + 1, opt_anchors); @@ -6198,7 +6266,7 @@ fn test_fail_holding_cell_htlc_upon_free() { // Send a payment which passes reserve checks but gets stuck in the holding cell. let our_payment_id = nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)).unwrap(); - chan_stat = get_channel_value_stat!(nodes[0], chan.2); + chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2); assert_eq!(chan_stat.holding_cell_outbound_amount_msat, max_can_send); // Flush the pending fee update. @@ -6211,7 +6279,7 @@ fn test_fail_holding_cell_htlc_upon_free() { // Upon receipt of the RAA, there will be an attempt to resend the holding cell // HTLC, but now that the fee has been raised the payment will now fail, causing // us to surface its failure to the user. - chan_stat = get_channel_value_stat!(nodes[0], chan.2); + chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2); assert_eq!(chan_stat.holding_cell_outbound_amount_msat, 0); nodes[0].logger.assert_log("lightning::ln::channel".to_string(), format!("Freeing holding cell with 1 HTLC updates in channel {}", hex::encode(chan.2)), 1); let failure_log = format!("Failed to send HTLC with payment_hash {} due to Cannot send value that would put our balance under counterparty-announced channel reserve value ({}) in channel {}", @@ -6267,10 +6335,10 @@ fn test_free_and_fail_holding_cell_htlcs() { nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap()); - let mut chan_stat = get_channel_value_stat!(nodes[0], chan.2); + let mut chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2); let channel_reserve = chan_stat.channel_reserve_msat; - let feerate = get_feerate!(nodes[0], chan.2); - let opt_anchors = get_opt_anchors!(nodes[0], chan.2); + let feerate = get_feerate!(nodes[0], nodes[1], chan.2); + let opt_anchors = get_opt_anchors!(nodes[0], nodes[1], chan.2); // 2* and +1 HTLCs on the commit tx fee calculation for the fee spike reserve. let amt_1 = 20000; @@ -6280,10 +6348,10 @@ fn test_free_and_fail_holding_cell_htlcs() { // Send 2 payments which pass reserve checks but get stuck in the holding cell. nodes[0].node.send_payment(&route_1, payment_hash_1, &Some(payment_secret_1)).unwrap(); - chan_stat = get_channel_value_stat!(nodes[0], chan.2); + chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2); assert_eq!(chan_stat.holding_cell_outbound_amount_msat, amt_1); let payment_id_2 = nodes[0].node.send_payment(&route_2, payment_hash_2, &Some(payment_secret_2)).unwrap(); - chan_stat = get_channel_value_stat!(nodes[0], chan.2); + chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2); assert_eq!(chan_stat.holding_cell_outbound_amount_msat, amt_1 + amt_2); // Flush the pending fee update. @@ -6297,7 +6365,7 @@ fn test_free_and_fail_holding_cell_htlcs() { // Upon receipt of the RAA, there will be an attempt to resend the holding cell HTLCs, // but now that the fee has been raised the second payment will now fail, causing us // to surface its failure to the user. The first payment should succeed. - chan_stat = get_channel_value_stat!(nodes[0], chan.2); + chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2); assert_eq!(chan_stat.holding_cell_outbound_amount_msat, 0); nodes[0].logger.assert_log("lightning::ln::channel".to_string(), format!("Freeing holding cell with 2 HTLC updates in channel {}", hex::encode(chan.2)), 1); let failure_log = format!("Failed to send HTLC with payment_hash {} due to Cannot send value that would put our balance under counterparty-announced channel reserve value ({}) in channel {}", @@ -6395,10 +6463,10 @@ fn test_fail_holding_cell_htlc_upon_free_multihop() { nodes[2].node.handle_update_fee(&nodes[1].node.get_our_node_id(), update_msg.unwrap()); - let mut chan_stat = get_channel_value_stat!(nodes[0], chan_0_1.2); + let mut chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan_0_1.2); let channel_reserve = chan_stat.channel_reserve_msat; - let feerate = get_feerate!(nodes[0], chan_0_1.2); - let opt_anchors = get_opt_anchors!(nodes[0], chan_0_1.2); + let feerate = get_feerate!(nodes[0], nodes[1], chan_0_1.2); + let opt_anchors = get_opt_anchors!(nodes[0], nodes[1], chan_0_1.2); // Send a payment which passes reserve checks but gets stuck in the holding cell. let feemsat = 239; @@ -6419,7 +6487,7 @@ fn test_fail_holding_cell_htlc_upon_free_multihop() { commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); expect_pending_htlcs_forwardable!(nodes[1]); - chan_stat = get_channel_value_stat!(nodes[1], chan_1_2.2); + chan_stat = get_channel_value_stat!(nodes[1], nodes[2], chan_1_2.2); assert_eq!(chan_stat.holding_cell_outbound_amount_msat, max_can_send); // Flush the pending fee update. @@ -6582,7 +6650,8 @@ fn test_update_add_htlc_bolt2_sender_exceed_max_htlc_num_and_htlc_id_increment() let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 0, InitFeatures::known(), InitFeatures::known()); - let max_accepted_htlcs = nodes[1].node.channel_state.lock().unwrap().by_id.get(&chan.2).unwrap().counterparty_max_accepted_htlcs as u64; + let max_accepted_htlcs = nodes[1].node.per_peer_state.read().unwrap().get(&nodes[0].node.get_our_node_id()) + .unwrap().lock().unwrap().channel_by_id.get(&chan.2).unwrap().counterparty_max_accepted_htlcs as u64; for i in 0..max_accepted_htlcs { let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000); @@ -6623,7 +6692,7 @@ fn test_update_add_htlc_bolt2_sender_exceed_max_htlc_value_in_flight() { let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); let channel_value = 100000; let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, channel_value, 0, InitFeatures::known(), InitFeatures::known()); - let max_in_flight = get_channel_value_stat!(nodes[0], chan.2).counterparty_max_htlc_value_in_flight_msat; + let max_in_flight = get_channel_value_stat!(nodes[0], nodes[1], chan.2).counterparty_max_htlc_value_in_flight_msat; send_payment(&nodes[0], &vec!(&nodes[1])[..], max_in_flight); @@ -6651,8 +6720,9 @@ fn test_update_add_htlc_bolt2_receiver_check_amount_received_more_than_min() { let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000, InitFeatures::known(), InitFeatures::known()); let htlc_minimum_msat: u64; { - let chan_lock = nodes[0].node.channel_state.lock().unwrap(); - let channel = chan_lock.by_id.get(&chan.2).unwrap(); + let per_peer_state = nodes[0].node.per_peer_state.read().unwrap(); + let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap(); + let channel = chan_lock.channel_by_id.get(&chan.2).unwrap(); htlc_minimum_msat = channel.get_holder_htlc_minimum_msat(); } @@ -6678,10 +6748,10 @@ fn test_update_add_htlc_bolt2_receiver_sender_can_afford_amount_sent() { let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000, InitFeatures::known(), InitFeatures::known()); - let chan_stat = get_channel_value_stat!(nodes[0], chan.2); + let chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2); let channel_reserve = chan_stat.channel_reserve_msat; - let feerate = get_feerate!(nodes[0], chan.2); - let opt_anchors = get_opt_anchors!(nodes[0], chan.2); + let feerate = get_feerate!(nodes[0], nodes[1], chan.2); + let opt_anchors = get_opt_anchors!(nodes[0], nodes[1], chan.2); // The 2* and +1 are for the fee spike reserve. let commit_tx_fee_outbound = 2 * commit_tx_fee_msat(feerate, 1 + 1, opt_anchors); @@ -6757,7 +6827,7 @@ fn test_update_add_htlc_bolt2_receiver_check_max_in_flight_msat() { nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)).unwrap(); check_added_monitors!(nodes[0], 1); let mut updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); - updates.update_add_htlcs[0].amount_msat = get_channel_value_stat!(nodes[1], chan.2).counterparty_max_htlc_value_in_flight_msat + 1; + updates.update_add_htlcs[0].amount_msat = get_channel_value_stat!(nodes[1], nodes[0], chan.2).counterparty_max_htlc_value_in_flight_msat + 1; nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); assert!(nodes[1].node.list_channels().is_empty()); @@ -7151,7 +7221,8 @@ fn do_test_failure_delay_dust_htlc_local_commitment(announce_latest: bool) { let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let chan =create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); - let bs_dust_limit = nodes[1].node.channel_state.lock().unwrap().by_id.get(&chan.2).unwrap().holder_dust_limit_satoshis; + let bs_dust_limit = nodes[1].node.per_peer_state.read().unwrap().get(&nodes[0].node.get_our_node_id()) + .unwrap().lock().unwrap().channel_by_id.get(&chan.2).unwrap().holder_dust_limit_satoshis; // We route 2 dust-HTLCs between A and B let (_, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1]], bs_dust_limit*1000); @@ -7242,7 +7313,8 @@ fn do_test_sweep_outbound_htlc_failure_update(revoked: bool, local: bool) { let nodes = create_network(3, &node_cfgs, &node_chanmgrs); let chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); - let bs_dust_limit = nodes[1].node.channel_state.lock().unwrap().by_id.get(&chan.2).unwrap().holder_dust_limit_satoshis; + let bs_dust_limit = nodes[1].node.per_peer_state.read().unwrap().get(&nodes[0].node.get_our_node_id()) + .unwrap().lock().unwrap().channel_by_id.get(&chan.2).unwrap().holder_dust_limit_satoshis; let (_payment_preimage_1, dust_hash, _payment_secret_1) = route_payment(&nodes[0], &[&nodes[1]], bs_dust_limit*1000); let (_payment_preimage_2, non_dust_hash, _payment_secret_2) = route_payment(&nodes[0], &[&nodes[1]], 1000000); @@ -7520,7 +7592,7 @@ fn test_data_loss_protect() { if let MessageSendEvent::HandleError { ref action, .. } = msg { match action { &ErrorAction::SendErrorMessage { ref msg } => { - assert_eq!(msg.data, "Failed to find corresponding channel"); + assert_eq!(msg.data, format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", &nodes[1].node.get_our_node_id())); err_msgs_0.push(msg.clone()); }, _ => panic!("Unexpected event!"), @@ -7533,7 +7605,7 @@ fn test_data_loss_protect() { nodes[1].node.handle_error(&nodes[0].node.get_our_node_id(), &err_msgs_0[0]); assert!(nodes[1].node.list_usable_channels().is_empty()); check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyForceClosed { peer_msg: "Failed to find corresponding channel".to_owned() }); + check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyForceClosed { peer_msg: format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", &nodes[1].node.get_our_node_id()) }); check_closed_broadcast!(nodes[1], false); } @@ -8087,8 +8159,9 @@ fn test_counterparty_raa_skip_no_crash() { let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).2; - let mut guard = nodes[0].node.channel_state.lock().unwrap(); - let keys = guard.by_id.get_mut(&channel_id).unwrap().get_signer(); + let mut per_peer_state = nodes[0].node.per_peer_state.write().unwrap(); + let mut guard = per_peer_state.get_mut(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap(); + let keys = guard.channel_by_id.get_mut(&channel_id).unwrap().get_signer(); const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1; @@ -8445,8 +8518,9 @@ fn test_reject_funding_before_inbound_channel_accepted() { // `handle_accept_channel`, which is required in order for `create_funding_transaction` to // succeed when `nodes[0]` is passed to it. { - let mut lock; - let channel = get_channel_ref!(&nodes[1], lock, temp_channel_id); + let mut node_1_per_peer_lock; + let mut node_1_peer_state_lock; + let channel = get_channel_ref!(&nodes[1], nodes[0], node_1_per_peer_lock, node_1_peer_state_lock, temp_channel_id); let accept_chan_msg = channel.get_accept_channel_message(); nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), InitFeatures::known(), &accept_chan_msg); } @@ -8531,8 +8605,8 @@ fn test_can_not_accept_unknown_inbound_channel() { let unknown_channel_id = [0; 32]; let api_res = nodes[0].node.accept_inbound_channel(&unknown_channel_id, &nodes[1].node.get_our_node_id(), 0); match api_res { - Err(APIError::ChannelUnavailable { err }) => { - assert_eq!(err, "Can't accept a channel that doesn't exist"); + Err(APIError::APIMisuseError { err }) => { + assert_eq!(err, format!("No such channel for the passed counterparty_node_id {}", nodes[1].node.get_our_node_id())); }, Ok(_) => panic!("It shouldn't be possible to accept an unkown channel"), Err(_) => panic!("Unexpected Error"), @@ -8788,7 +8862,9 @@ fn test_update_err_monitor_lockdown() { let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); assert_eq!(updates.update_fulfill_htlcs.len(), 1); nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]); - if let Some(ref mut channel) = nodes[0].node.channel_state.lock().unwrap().by_id.get_mut(&chan_1.2) { + if let Some(ref mut channel) = nodes[0].node.per_peer_state.write().unwrap().get_mut(&nodes[1].node.get_our_node_id()) + .unwrap().lock().unwrap().channel_by_id.get_mut(&chan_1.2) + { if let Ok((_, _, update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) { if let Err(_) = watchtower.chain_monitor.update_channel(outpoint, update.clone()) {} else { assert!(false); } if let Ok(_) = nodes[0].chain_monitor.update_channel(outpoint, update) {} else { assert!(false); } @@ -8879,7 +8955,9 @@ fn test_concurrent_monitor_claim() { let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); assert_eq!(updates.update_add_htlcs.len(), 1); nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &updates.update_add_htlcs[0]); - if let Some(ref mut channel) = nodes[0].node.channel_state.lock().unwrap().by_id.get_mut(&chan_1.2) { + if let Some(ref mut channel) = nodes[0].node.per_peer_state.write().unwrap().get_mut(&nodes[1].node.get_our_node_id()) + .unwrap().lock().unwrap().channel_by_id.get_mut(&chan_1.2) + { if let Ok((_, _, update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) { // Watchtower Alice should already have seen the block and reject the update if let Err(_) = watchtower_alice.chain_monitor.update_channel(outpoint, update.clone()) {} else { assert!(false); } @@ -9199,6 +9277,56 @@ fn test_onchain_htlc_settlement_after_close() { do_test_onchain_htlc_settlement_after_close(false, false); } +#[test] +fn test_duplicate_temporary_channel_id_from_different_peers() { + // Tests that we can accept two different `OpenChannel` requests with the same + // `temporary_channel_id`, as long as they are from different peers. + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + // Create an first channel channel + nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 100000, 10001, 42, None).unwrap(); + let mut open_chan_msg_chan_1_0 = get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id()); + + // Create an second channel + nodes[2].node.create_channel(nodes[0].node.get_our_node_id(), 100000, 10001, 43, None).unwrap(); + let mut open_chan_msg_chan_2_0 = get_event_msg!(nodes[2], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id()); + + // Modify the `OpenChannel` from `nodes[2]` to `nodes[0]` to ensure that it uses the same + // `temporary_channel_id` as the `OpenChannel` from nodes[1] to nodes[0]. + open_chan_msg_chan_2_0.temporary_channel_id = open_chan_msg_chan_1_0.temporary_channel_id; + + // Assert that `nodes[0]` can accept both `OpenChannel` requests, even though they use the same + // `temporary_channel_id` as they are from different peers. + nodes[0].node.handle_open_channel(&nodes[1].node.get_our_node_id(), InitFeatures::known(), &open_chan_msg_chan_1_0); + { + let events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + match &events[0] { + MessageSendEvent::SendAcceptChannel { node_id, msg } => { + assert_eq!(node_id, &nodes[1].node.get_our_node_id()); + assert_eq!(msg.temporary_channel_id, open_chan_msg_chan_1_0.temporary_channel_id); + }, + _ => panic!("Unexpected event"), + } + } + + nodes[0].node.handle_open_channel(&nodes[2].node.get_our_node_id(), InitFeatures::known(), &open_chan_msg_chan_2_0); + { + let events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + match &events[0] { + MessageSendEvent::SendAcceptChannel { node_id, msg } => { + assert_eq!(node_id, &nodes[2].node.get_our_node_id()); + assert_eq!(msg.temporary_channel_id, open_chan_msg_chan_1_0.temporary_channel_id); + }, + _ => panic!("Unexpected event"), + } + } +} + #[test] fn test_duplicate_chan_id() { // Test that if a given peer tries to open a channel with the same channel_id as one that is @@ -9287,12 +9415,13 @@ fn test_duplicate_chan_id() { create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 42); // Get and check the FundingGenerationReady event let funding_created = { - let mut a_channel_lock = nodes[0].node.channel_state.lock().unwrap(); + let mut per_peer_state = nodes[0].node.per_peer_state.write().unwrap(); + let mut a_channel_lock = per_peer_state.get_mut(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap(); // Once we call `get_outbound_funding_created` the channel has a duplicate channel_id as // another channel in the ChannelManager - an invalid state. Thus, we'd panic later when we // try to create another channel. Instead, we drop the channel entirely here (leaving the // channelmanager in a possibly nonsense state instead). - let mut as_chan = a_channel_lock.by_id.remove(&open_chan_2_msg.temporary_channel_id).unwrap(); + let mut as_chan = a_channel_lock.channel_by_id.remove(&open_chan_2_msg.temporary_channel_id).unwrap(); let logger = test_utils::TestLogger::new(); as_chan.get_outbound_funding_created(tx.clone(), funding_outpoint, &&logger).unwrap() }; @@ -10008,8 +10137,8 @@ fn do_test_partial_claim_before_restart(persist_both_monitors: bool) { // Send the payment through to nodes[3] *without* clearing the PaymentReceived event let mut send_events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(send_events.len(), 2); - do_pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 15_000_000, payment_hash, Some(payment_secret), send_events[0].clone(), true, false, None); - do_pass_along_path(&nodes[0], &[&nodes[2], &nodes[3]], 15_000_000, payment_hash, Some(payment_secret), send_events[1].clone(), true, false, None); + do_pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 15_000_000, payment_hash, Some(payment_secret), send_events[0].clone(), true, false, None, None); + do_pass_along_path(&nodes[0], &[&nodes[2], &nodes[3]], 15_000_000, payment_hash, Some(payment_secret), send_events[1].clone(), true, false, None, None); // Now that we have an MPP payment pending, get the latest encoded copies of nodes[3]'s // monitors and ChannelManager, for use later, if we don't want to persist both monitors. @@ -10203,7 +10332,9 @@ fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_e let (temporary_channel_id, tx, _) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 1_000_000, 42); if on_holder_tx { - if let Some(mut chan) = nodes[0].node.channel_state.lock().unwrap().by_id.get_mut(&temporary_channel_id) { + if let Some(mut chan) = nodes[0].node.per_peer_state.write().unwrap().get_mut(&nodes[1].node.get_our_node_id()) + .unwrap().lock().unwrap().channel_by_id.get_mut(&temporary_channel_id) + { chan.holder_dust_limit_satoshis = 546; } } @@ -10220,8 +10351,9 @@ fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_e update_nodes_with_chan_announce(&nodes, 0, 1, &announcement, &as_update, &bs_update); let dust_buffer_feerate = { - let chan_lock = nodes[0].node.channel_state.lock().unwrap(); - let chan = chan_lock.by_id.get(&channel_id).unwrap(); + let per_peer_state = nodes[0].node.per_peer_state.read().unwrap(); + let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap(); + let chan = chan_lock.channel_by_id.get(&channel_id).unwrap(); chan.get_dust_buffer_feerate(None) as u64 }; let dust_outbound_htlc_on_holder_tx_msat: u64 = (dust_buffer_feerate * htlc_timeout_tx_weight(opt_anchors) / 1000 + open_channel.dust_limit_satoshis - 1) * 1000; diff --git a/lightning/src/ln/monitor_tests.rs b/lightning/src/ln/monitor_tests.rs index 492a6553770..b01549200d4 100644 --- a/lightning/src/ln/monitor_tests.rs +++ b/lightning/src/ln/monitor_tests.rs @@ -110,8 +110,8 @@ fn chanmon_claim_value_coop_close() { let funding_outpoint = OutPoint { txid: funding_tx.txid(), index: 0 }; assert_eq!(funding_outpoint.to_channel_id(), chan_id); - let chan_feerate = get_feerate!(nodes[0], chan_id) as u64; - let opt_anchors = get_opt_anchors!(nodes[0], chan_id); + let chan_feerate = get_feerate!(nodes[0], nodes[1], chan_id) as u64; + let opt_anchors = get_opt_anchors!(nodes[0], nodes[1], chan_id); assert_eq!(vec![Balance::ClaimableOnChannelClose { claimable_amount_satoshis: 1_000_000 - 1_000 - chan_feerate * channel::commitment_tx_base_weight(opt_anchors) / 1000 @@ -210,8 +210,8 @@ fn do_test_claim_value_force_close(prev_commitment_tx: bool) { let htlc_cltv_timeout = nodes[0].best_block_info().1 + TEST_FINAL_CLTV + 1; // Note ChannelManager adds one to CLTV timeouts for safety - let chan_feerate = get_feerate!(nodes[0], chan_id) as u64; - let opt_anchors = get_opt_anchors!(nodes[0], chan_id); + let chan_feerate = get_feerate!(nodes[0], nodes[1], chan_id) as u64; + let opt_anchors = get_opt_anchors!(nodes[0], nodes[1], chan_id); let remote_txn = get_local_commitment_txn!(nodes[1], chan_id); // Before B receives the payment preimage, it only suggests the push_msat value of 1_000 sats @@ -557,8 +557,8 @@ fn test_balances_on_local_commitment_htlcs() { check_added_monitors!(nodes[1], 1); expect_payment_claimed!(nodes[1], payment_hash_2, 20_000_000); - let chan_feerate = get_feerate!(nodes[0], chan_id) as u64; - let opt_anchors = get_opt_anchors!(nodes[0], chan_id); + let chan_feerate = get_feerate!(nodes[0], nodes[1], chan_id) as u64; + let opt_anchors = get_opt_anchors!(nodes[0], nodes[1], chan_id); // Get nodes[0]'s commitment transaction and HTLC-Timeout transactions let as_txn = get_local_commitment_txn!(nodes[0], chan_id); diff --git a/lightning/src/ln/onion_route_tests.rs b/lightning/src/ln/onion_route_tests.rs index 1e340d4a15e..d3740691010 100644 --- a/lightning/src/ln/onion_route_tests.rs +++ b/lightning/src/ln/onion_route_tests.rs @@ -490,7 +490,9 @@ fn test_onion_failure() { Some(NetworkUpdate::ChannelFailure{short_channel_id, is_permanent:true}), Some(short_channel_id)); let short_channel_id = channels[1].0.contents.short_channel_id; - let amt_to_forward = nodes[1].node.channel_state.lock().unwrap().by_id.get(&channels[1].2).unwrap().get_counterparty_htlc_minimum_msat() - 1; + let amt_to_forward = nodes[1].node.per_peer_state.read().unwrap().get(&nodes[2].node.get_our_node_id()) + .unwrap().lock().unwrap().channel_by_id.get(&channels[1].2).unwrap() + .get_counterparty_htlc_minimum_msat() - 1; let mut bogus_route = route.clone(); let route_len = bogus_route.paths[0].len(); bogus_route.paths[0][route_len-1].fee_msat = amt_to_forward; diff --git a/lightning/src/ln/payment_tests.rs b/lightning/src/ln/payment_tests.rs index 935cb8f9ba4..9a156632019 100644 --- a/lightning/src/ln/payment_tests.rs +++ b/lightning/src/ln/payment_tests.rs @@ -463,7 +463,7 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) { MessageSendEvent::HandleError { node_id, action: msgs::ErrorAction::SendErrorMessage { ref msg } } => { assert_eq!(node_id, nodes[1].node.get_our_node_id()); nodes[1].node.handle_error(&nodes[0].node.get_our_node_id(), msg); - check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyForceClosed { peer_msg: "Failed to find corresponding channel".to_string() }); + check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyForceClosed { peer_msg: format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", &nodes[1].node.get_our_node_id()) }); check_added_monitors!(nodes[1], 1); assert_eq!(nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0).len(), 1); }, @@ -531,7 +531,8 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) { // Update the fee on the middle hop to ensure PaymentSent events have the correct (retried) fee // and not the original fee. We also update node[1]'s relevant config as // do_claim_payment_along_route expects us to never overpay. - nodes[1].node.channel_state.lock().unwrap().by_id.get_mut(&chan_id_2).unwrap() + nodes[1].node.per_peer_state.write().unwrap().get_mut(&nodes[2].node.get_our_node_id()) + .unwrap().lock().unwrap().channel_by_id.get_mut(&chan_id_2).unwrap() .config.options.forwarding_fee_base_msat += 100_000; new_route.paths[0][0].fee_msat += 100_000; diff --git a/lightning/src/ln/reorg_tests.rs b/lightning/src/ln/reorg_tests.rs index 0025598e4c2..667b0ca30d1 100644 --- a/lightning/src/ln/reorg_tests.rs +++ b/lightning/src/ln/reorg_tests.rs @@ -27,7 +27,6 @@ use bitcoin::hash_types::BlockHash; use bitcoin::secp256k1::Secp256k1; use prelude::*; -use core::mem; use ln::functional_test_utils::*; @@ -200,10 +199,13 @@ fn do_test_unconf_chan(reload_node: bool, reorg_after_reload: bool, use_funding_ let chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); - let channel_state = nodes[0].node.channel_state.lock().unwrap(); - assert_eq!(channel_state.by_id.len(), 1); - assert_eq!(channel_state.short_to_id.len(), 2); - mem::drop(channel_state); + { + let channel_state = nodes[0].node.channel_state.lock().unwrap(); + let per_peer_state = nodes[0].node.per_peer_state.read().unwrap(); + let peer_state = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap(); + assert_eq!(peer_state.channel_by_id.len(), 1); + assert_eq!(channel_state.short_to_chan_info.len(), 2); + } if !reorg_after_reload { if use_funding_unconfirmed { @@ -221,8 +223,10 @@ fn do_test_unconf_chan(reload_node: bool, reorg_after_reload: bool, use_funding_ check_added_monitors!(nodes[1], 1); { let channel_state = nodes[0].node.channel_state.lock().unwrap(); - assert_eq!(channel_state.by_id.len(), 0); - assert_eq!(channel_state.short_to_id.len(), 0); + let per_peer_state = nodes[0].node.per_peer_state.read().unwrap(); + let peer_state = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap(); + assert_eq!(peer_state.channel_by_id.len(), 0); + assert_eq!(channel_state.short_to_chan_info.len(), 0); } } @@ -289,8 +293,10 @@ fn do_test_unconf_chan(reload_node: bool, reorg_after_reload: bool, use_funding_ check_added_monitors!(nodes[1], 1); { let channel_state = nodes[0].node.channel_state.lock().unwrap(); - assert_eq!(channel_state.by_id.len(), 0); - assert_eq!(channel_state.short_to_id.len(), 0); + let per_peer_state = nodes[0].node.per_peer_state.read().unwrap(); + let peer_state = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap(); + assert_eq!(peer_state.channel_by_id.len(), 0); + assert_eq!(channel_state.short_to_chan_info.len(), 0); } } // With expect_channel_force_closed set the TestChainMonitor will enforce that the next update diff --git a/lightning/src/ln/shutdown_tests.rs b/lightning/src/ln/shutdown_tests.rs index 3e9f7e343db..496f64ee97b 100644 --- a/lightning/src/ln/shutdown_tests.rs +++ b/lightning/src/ln/shutdown_tests.rs @@ -381,7 +381,7 @@ fn do_test_shutdown_rebroadcast(recv_count: u8) { // closing_signed so we do it ourselves check_closed_broadcast!(nodes[1], false); check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyForceClosed { peer_msg: "Failed to find corresponding channel".to_string() }); + check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyForceClosed { peer_msg: format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", &nodes[1].node.get_our_node_id()) }); } assert!(nodes[0].node.list_channels().is_empty()); @@ -762,8 +762,9 @@ fn do_test_closing_signed_reinit_timeout(timeout_step: TimeoutStep) { // nodes[1] should happily accept and respond to. node_0_closing_signed.fee_range.as_mut().unwrap().max_fee_satoshis *= 10; { - let mut lock; - get_channel_ref!(nodes[0], lock, chan_id).closing_fee_limits.as_mut().unwrap().1 *= 10; + let mut node_0_per_peer_lock; + let mut node_0_peer_state_lock; + get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan_id).closing_fee_limits.as_mut().unwrap().1 *= 10; } nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_closing_signed); let node_1_closing_signed = get_event_msg!(nodes[1], MessageSendEvent::SendClosingSigned, nodes[0].node.get_our_node_id());