diff --git a/lightning-background-processor/src/lib.rs b/lightning-background-processor/src/lib.rs index 318dfcbbd7e..157bc4e85ed 100644 --- a/lightning-background-processor/src/lib.rs +++ b/lightning-background-processor/src/lib.rs @@ -1098,7 +1098,7 @@ mod tests { use lightning::routing::gossip::{NetworkGraph, P2PGossipSync}; use lightning::routing::router::{CandidateRouteHop, DefaultRouter, Path, RouteHop}; use lightning::routing::scoring::{ChannelUsage, LockableScore, ScoreLookUp, ScoreUpdate}; - use lightning::sign::{ChangeDestinationSource, InMemorySigner, KeysManager}; + use lightning::sign::{ChangeDestinationSource, InMemorySigner, KeysManager, NodeSigner}; use lightning::util::config::UserConfig; use lightning::util::persist::{ KVStore, CHANNEL_MANAGER_PERSISTENCE_KEY, CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, @@ -1219,6 +1219,7 @@ mod tests { Arc, IgnoringMessageHandler, Arc, + IgnoringMessageHandler, >, >, chain_monitor: Arc, @@ -1579,6 +1580,7 @@ mod tests { logger.clone(), fee_estimator.clone(), kv_store.clone(), + keys_manager.get_peer_storage_key(), )); let best_block = BestBlock::from_network(network); let params = ChainParameters { network, best_block }; @@ -1632,6 +1634,7 @@ mod tests { route_handler: Arc::new(test_utils::TestRoutingMessageHandler::new()), onion_message_handler: messenger.clone(), custom_message_handler: IgnoringMessageHandler {}, + send_only_message_handler: IgnoringMessageHandler {}, }; let peer_manager = Arc::new(PeerManager::new( msg_handler, diff --git a/lightning-net-tokio/src/lib.rs b/lightning-net-tokio/src/lib.rs index 89ac7a52ec2..8f289d60d2f 100644 --- a/lightning-net-tokio/src/lib.rs +++ b/lightning-net-tokio/src/lib.rs @@ -760,6 +760,8 @@ mod tests { fn handle_tx_init_rbf(&self, _their_node_id: PublicKey, _msg: &TxInitRbf) {} fn handle_tx_ack_rbf(&self, _their_node_id: PublicKey, _msg: &TxAckRbf) {} fn handle_tx_abort(&self, _their_node_id: PublicKey, _msg: &TxAbort) {} + fn handle_peer_storage(&self, _their_node_id: PublicKey, _msg: &PeerStorageMessage) {} + fn handle_your_peer_storage(&self, _their_node_id: PublicKey, _msg: &YourPeerStorageMessage) {} fn peer_disconnected(&self, their_node_id: PublicKey) { if their_node_id == self.expected_pubkey { self.disconnected_flag.store(true, Ordering::SeqCst); @@ -835,6 +837,7 @@ mod tests { route_handler: Arc::clone(&a_handler), onion_message_handler: Arc::new(IgnoringMessageHandler {}), custom_message_handler: Arc::new(IgnoringMessageHandler {}), + send_only_message_handler: Arc::new(IgnoringMessageHandler {}), }; let a_manager = Arc::new(PeerManager::new( a_msg_handler, @@ -858,6 +861,7 @@ mod tests { route_handler: Arc::clone(&b_handler), onion_message_handler: Arc::new(IgnoringMessageHandler {}), custom_message_handler: Arc::new(IgnoringMessageHandler {}), + send_only_message_handler: Arc::new(IgnoringMessageHandler {}), }; let b_manager = Arc::new(PeerManager::new( b_msg_handler, @@ -920,6 +924,7 @@ mod tests { onion_message_handler: Arc::new(IgnoringMessageHandler {}), route_handler: Arc::new(lightning::ln::peer_handler::IgnoringMessageHandler {}), custom_message_handler: Arc::new(IgnoringMessageHandler {}), + send_only_message_handler: Arc::new(IgnoringMessageHandler {}), }; let a_manager = Arc::new(PeerManager::new( a_msg_handler, diff --git a/lightning-types/src/features.rs b/lightning-types/src/features.rs index 036ac4e84ba..636654a2acf 100644 --- a/lightning-types/src/features.rs +++ b/lightning-types/src/features.rs @@ -70,6 +70,8 @@ //! (see the [`Trampoline` feature proposal](https://github.com/lightning/bolts/pull/836) for more information). //! - `DnsResolver` - supports resolving DNS names to TXT DNSSEC proofs for BIP 353 payments //! (see [bLIP 32](https://github.com/lightning/blips/blob/master/blip-0032.md) for more information). +//! - `ProvidePeerBackupStorage` - Indicates that we offer the capability to store data of our peers +//! (see https://github.com/lightning/bolts/pull/1110 for more info). //! //! LDK knows about the following features, but does not support them: //! - `AnchorsNonzeroFeeHtlcTx` - the initial version of anchor outputs, which was later found to be @@ -150,7 +152,7 @@ mod sealed { // Byte 4 OnionMessages, // Byte 5 - ChannelType | SCIDPrivacy, + ProvidePeerBackupStorage | ChannelType | SCIDPrivacy, // Byte 6 ZeroConf, // Byte 7 @@ -171,7 +173,7 @@ mod sealed { // Byte 4 OnionMessages, // Byte 5 - ChannelType | SCIDPrivacy, + ProvidePeerBackupStorage | ChannelType | SCIDPrivacy, // Byte 6 ZeroConf | Keysend, // Byte 7 @@ -522,6 +524,16 @@ mod sealed { supports_onion_messages, requires_onion_messages ); + define_feature!( + 43, + ProvidePeerBackupStorage, + [InitContext, NodeContext], + "Feature flags for `provide_peer_backup_storage`.", + set_provide_peer_backup_storage_optional, + set_provide_peer_backup_storage_required, + supports_provide_peer_storage, + requires_provide_peer_storage + ); define_feature!( 45, ChannelType, @@ -1104,6 +1116,14 @@ mod tests { assert!(!features1.requires_unknown_bits_from(&features2)); assert!(!features2.requires_unknown_bits_from(&features1)); + features1.set_provide_peer_backup_storage_required(); + assert!(features1.requires_unknown_bits_from(&features2)); + assert!(!features2.requires_unknown_bits_from(&features1)); + + features2.set_provide_peer_backup_storage_optional(); + assert!(!features1.requires_unknown_bits_from(&features2)); + assert!(!features2.requires_unknown_bits_from(&features1)); + features1.set_data_loss_protect_required(); assert!(features1.requires_unknown_bits_from(&features2)); assert!(!features2.requires_unknown_bits_from(&features1)); diff --git a/lightning/src/chain/chainmonitor.rs b/lightning/src/chain/chainmonitor.rs index afd9df62851..4e7fdc1e6bb 100644 --- a/lightning/src/chain/chainmonitor.rs +++ b/lightning/src/chain/chainmonitor.rs @@ -29,22 +29,27 @@ use bitcoin::hash_types::{Txid, BlockHash}; use crate::chain; use crate::chain::{ChannelMonitorUpdateStatus, Filter, WatchedOutput}; use crate::chain::chaininterface::{BroadcasterInterface, FeeEstimator}; -use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, Balance, MonitorEvent, TransactionOutputs, WithChannelMonitor}; +use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, Balance, MonitorEvent, TransactionOutputs, WithChannelMonitor, write_util}; use crate::chain::transaction::{OutPoint, TransactionData}; use crate::ln::types::ChannelId; +use crate::ln::msgs; use crate::sign::ecdsa::EcdsaChannelSigner; use crate::events::{self, Event, EventHandler, ReplayEvent}; use crate::util::logger::{Logger, WithContext}; use crate::util::errors::APIError; +use crate::util::ser::VecWriter; use crate::util::wakers::{Future, Notifier}; use crate::ln::channel_state::ChannelDetails; - +use crate::ln::msgs::SendingOnlyMessageHandler; +use crate::events::{MessageSendEvent, MessageSendEventsProvider}; use crate::prelude::*; use crate::sync::{RwLock, RwLockReadGuard, Mutex, MutexGuard}; use core::ops::Deref; use core::sync::atomic::{AtomicUsize, Ordering}; use bitcoin::hashes::Hash; use bitcoin::secp256k1::PublicKey; +use core::mem; +use crate::ln::our_peer_storage::OurPeerStorage; /// `Persist` defines behavior for persisting channel monitors: this could mean /// writing once to disk, and/or uploading to one or more backup services. @@ -165,8 +170,8 @@ pub trait Persist { fn archive_persisted_channel(&self, channel_funding_outpoint: OutPoint); } -struct MonitorHolder { - monitor: ChannelMonitor, +pub(crate) struct MonitorHolder { + pub(crate) monitor: ChannelMonitor, /// The full set of pending monitor updates for this Channel. /// /// Note that this lock must be held from [`ChannelMonitor::update_monitor`] through to @@ -181,7 +186,7 @@ struct MonitorHolder { /// could cause users to have a full [`ChannelMonitor`] on disk as well as a /// [`ChannelMonitorUpdate`] which was already applied. While this isn't an issue for the /// LDK-provided update-based [`Persist`], it is somewhat surprising for users so we avoid it. - pending_monitor_updates: Mutex>, + pub(crate) pending_monitor_updates: Mutex>, } impl MonitorHolder { @@ -195,8 +200,8 @@ impl MonitorHolder { /// Note that this holds a mutex in [`ChainMonitor`] and may block other events until it is /// released. pub struct LockedChannelMonitor<'a, ChannelSigner: EcdsaChannelSigner> { - lock: RwLockReadGuard<'a, HashMap>>, - funding_txo: OutPoint, + pub(crate) lock: RwLockReadGuard<'a, HashMap>>, + pub(crate) funding_txo: OutPoint, } impl Deref for LockedChannelMonitor<'_, ChannelSigner> { @@ -244,75 +249,24 @@ pub struct ChainMonitor>, + our_peer_storage: Mutex, + our_peerstorage_encryption_key: [u8;32], } -impl ChainMonitor -where C::Target: chain::Filter, - T::Target: BroadcasterInterface, - F::Target: FeeEstimator, - L::Target: Logger, - P::Target: Persist, -{ - /// Dispatches to per-channel monitors, which are responsible for updating their on-chain view - /// of a channel and reacting accordingly based on transactions in the given chain data. See - /// [`ChannelMonitor::block_connected`] for details. Any HTLCs that were resolved on chain will - /// be returned by [`chain::Watch::release_pending_monitor_events`]. - /// - /// Calls back to [`chain::Filter`] if any monitor indicated new outputs to watch. Subsequent - /// calls must not exclude any transactions matching the new outputs nor any in-block - /// descendants of such transactions. It is not necessary to re-fetch the block to obtain - /// updated `txdata`. - /// - /// Calls which represent a new blockchain tip height should set `best_height`. - fn process_chain_data(&self, header: &Header, best_height: Option, txdata: &TransactionData, process: FN) +pub(crate) fn update_monitor_with_chain_data_util ( + persister: &P, chain_source: &Option, logger: &L, header: &Header, best_height: Option, txdata: &TransactionData, process: FN, funding_outpoint: &OutPoint, + monitor_state: &MonitorHolder, channel_count: usize, + ) -> Result<(), ()> where - FN: Fn(&ChannelMonitor, &TransactionData) -> Vec + C::Target: chain::Filter, + FN: Fn(&ChannelMonitor, &TransactionData) -> Vec, + P::Target: Persist, + L::Target: Logger, + ChannelSigner: EcdsaChannelSigner, { - let err_str = "ChannelMonitor[Update] persistence failed unrecoverably. This indicates we cannot continue normal operation and must shut down."; - let funding_outpoints = hash_set_from_iter(self.monitors.read().unwrap().keys().cloned()); - let channel_count = funding_outpoints.len(); - for funding_outpoint in funding_outpoints.iter() { - let monitor_lock = self.monitors.read().unwrap(); - if let Some(monitor_state) = monitor_lock.get(funding_outpoint) { - if self.update_monitor_with_chain_data(header, best_height, txdata, &process, funding_outpoint, &monitor_state, channel_count).is_err() { - // Take the monitors lock for writing so that we poison it and any future - // operations going forward fail immediately. - core::mem::drop(monitor_lock); - let _poison = self.monitors.write().unwrap(); - log_error!(self.logger, "{}", err_str); - panic!("{}", err_str); - } - } - } - - // do some followup cleanup if any funding outpoints were added in between iterations - let monitor_states = self.monitors.write().unwrap(); - for (funding_outpoint, monitor_state) in monitor_states.iter() { - if !funding_outpoints.contains(funding_outpoint) { - if self.update_monitor_with_chain_data(header, best_height, txdata, &process, funding_outpoint, &monitor_state, channel_count).is_err() { - log_error!(self.logger, "{}", err_str); - panic!("{}", err_str); - } - } - } - - if let Some(height) = best_height { - // If the best block height is being updated, update highest_chain_height under the - // monitors write lock. - let old_height = self.highest_chain_height.load(Ordering::Acquire); - let new_height = height as usize; - if new_height > old_height { - self.highest_chain_height.store(new_height, Ordering::Release); - } - } - } - - fn update_monitor_with_chain_data( - &self, header: &Header, best_height: Option, txdata: &TransactionData, process: FN, funding_outpoint: &OutPoint, - monitor_state: &MonitorHolder, channel_count: usize, - ) -> Result<(), ()> where FN: Fn(&ChannelMonitor, &TransactionData) -> Vec { let monitor = &monitor_state.monitor; - let logger = WithChannelMonitor::from(&self.logger, &monitor, None); + let logger = WithChannelMonitor::from(logger, &monitor, None); let mut txn_outputs = process(monitor, txdata); @@ -337,7 +291,7 @@ where C::Target: chain::Filter, // `ChannelMonitorUpdate` after a channel persist for a channel with the same // `latest_update_id`. let _pending_monitor_updates = monitor_state.pending_monitor_updates.lock().unwrap(); - match self.persister.update_persisted_channel(*funding_outpoint, None, monitor) { + match persister.update_persisted_channel(*funding_outpoint, None, monitor) { ChannelMonitorUpdateStatus::Completed => log_trace!(logger, "Finished syncing Channel Monitor for channel {} for block-data", log_funding_info!(monitor) @@ -353,7 +307,7 @@ where C::Target: chain::Filter, // Register any new outputs with the chain source for filtering, storing any dependent // transactions from within the block that previously had not been included in txdata. - if let Some(ref chain_source) = self.chain_source { + if let Some(ref chain_source_ref) = chain_source { let block_hash = header.block_hash(); for (txid, mut outputs) in txn_outputs.drain(..) { for (idx, output) in outputs.drain(..) { @@ -364,13 +318,133 @@ where C::Target: chain::Filter, script_pubkey: output.script_pubkey, }; log_trace!(logger, "Adding monitoring for spends of outpoint {} to the filter", output.outpoint); - chain_source.register_output(output); + chain_source_ref.register_output(output); } } } Ok(()) } +// Utility function for process_chain_data to prevent code duplication in [`FundRecoverer`] +pub(crate) fn process_chain_data_util(persister: &P, chain_source: &Option, + logger: &L, monitors: &RwLock>>, highest_chain_height: &AtomicUsize, + header: &Header, best_height: Option, txdata: &TransactionData, process: FN) +where + FN: Fn(&ChannelMonitor, &TransactionData) -> Vec, + L::Target: Logger, + P::Target: Persist, + C::Target: chain::Filter, +{ + let err_str = "ChannelMonitor[Update] persistence failed unrecoverably. This indicates we cannot continue normal operation and must shut down."; + let funding_outpoints = hash_set_from_iter(monitors.read().unwrap().keys().cloned()); + let channel_count = funding_outpoints.len(); + for funding_outpoint in funding_outpoints.iter() { + let monitor_lock = monitors.read().unwrap(); + if let Some(monitor_state) = monitor_lock.get(funding_outpoint) { + if update_monitor_with_chain_data_util(persister, chain_source, logger, header, best_height, txdata, &process, funding_outpoint, &monitor_state, channel_count).is_err() { + // Take the monitors lock for writing so that we poison it and any future + // operations going forward fail immediately. + core::mem::drop(monitor_lock); + let _poison = monitors.write().unwrap(); + log_error!(logger, "{}", err_str); + panic!("{}", err_str); + } + } + } + + // do some followup cleanup if any funding outpoints were added in between iterations + let monitor_states = monitors.write().unwrap(); + for (funding_outpoint, monitor_state) in monitor_states.iter() { + if !funding_outpoints.contains(funding_outpoint) { + if update_monitor_with_chain_data_util(persister, chain_source, logger, header, best_height, txdata, &process, funding_outpoint, &monitor_state, channel_count).is_err() { + log_error!(logger, "{}", err_str); + panic!("{}", err_str); + } + } + } + + if let Some(height) = best_height { + // If the best block height is being updated, update highest_chain_height under the + // monitors write lock. + let old_height = highest_chain_height.load(Ordering::Acquire); + let new_height = height as usize; + if new_height > old_height { + highest_chain_height.store(new_height, Ordering::Release); + } + } +} + +impl MessageSendEventsProvider for ChainMonitor +where C::Target: chain::Filter, + T::Target: BroadcasterInterface, + F::Target: FeeEstimator, + L::Target: Logger, + P::Target: Persist, +{ + fn get_and_clear_pending_msg_events(&self) -> Vec { + let mut pending_events = self.pending_send_only_events.lock().unwrap(); + let mut ret = Vec::new(); + mem::swap(&mut ret, &mut *pending_events); + ret } +} + +impl SendingOnlyMessageHandler for ChainMonitor +where C::Target: chain::Filter, + T::Target: BroadcasterInterface, + F::Target: FeeEstimator, + L::Target: Logger, + P::Target: Persist, +{ + fn send_peer_storage(&self, their_node_id: PublicKey) { + let monitors: RwLockReadGuard<'_, hash_map::HashMap, RandomState>> = self.monitors.read().unwrap(); + let mut ser_channels: Vec = Vec::new(); + log_debug!(self.logger, "Sending Peer Storage from chainmonitor"); + ser_channels.extend_from_slice(&(monitors.len() as u64).to_be_bytes()); + for (_, mon) in monitors.iter() { + let mut ser_chan = VecWriter(Vec::new()); + + match write_util(&mon.monitor.inner.lock().unwrap(), true, &mut ser_chan) { + Ok(_) => { + ser_channels.extend_from_slice(&(ser_chan.0.len() as u64).to_be_bytes()); + ser_channels.extend(ser_chan.0.iter()); + } + Err(_) => { + panic!("Can not write monitor for {}", mon.monitor.channel_id()) + } + } + } + self.our_peer_storage.lock().unwrap().stub_channels(ser_channels); + + self.pending_send_only_events.lock().unwrap().push(events::MessageSendEvent::SendPeerStorageMessage { node_id: their_node_id + , msg: msgs::PeerStorageMessage { data: self.our_peer_storage.lock().unwrap().encrypt_our_peer_storage(self.our_peerstorage_encryption_key) } }) + } +} + +impl ChainMonitor +where C::Target: chain::Filter, + T::Target: BroadcasterInterface, + F::Target: FeeEstimator, + L::Target: Logger, + P::Target: Persist, +{ + /// Dispatches to per-channel monitors, which are responsible for updating their on-chain view + /// of a channel and reacting accordingly based on transactions in the given chain data. See + /// [`ChannelMonitor::block_connected`] for details. Any HTLCs that were resolved on chain will + /// be returned by [`chain::Watch::release_pending_monitor_events`]. + /// + /// Calls back to [`chain::Filter`] if any monitor indicated new outputs to watch. Subsequent + /// calls must not exclude any transactions matching the new outputs nor any in-block + /// descendants of such transactions. It is not necessary to re-fetch the block to obtain + /// updated `txdata`. + /// + /// Calls which represent a new blockchain tip height should set `best_height`. + fn process_chain_data(&self, header: &Header, best_height: Option, txdata: &TransactionData, process: FN) + where + FN: Fn(&ChannelMonitor, &TransactionData) -> Vec, + { + process_chain_data_util(&self.persister, &self.chain_source, &self.logger, &self.monitors, &self.highest_chain_height, header, best_height, txdata, process); + } + /// Creates a new `ChainMonitor` used to watch on-chain activity pertaining to channels. /// /// When an optional chain source implementing [`chain::Filter`] is provided, the chain monitor @@ -378,7 +452,7 @@ where C::Target: chain::Filter, /// pre-filter blocks or only fetch blocks matching a compact filter. Otherwise, clients may /// always need to fetch full blocks absent another means for determining which blocks contain /// transactions relevant to the watched channels. - pub fn new(chain_source: Option, broadcaster: T, logger: L, feeest: F, persister: P) -> Self { + pub fn new(chain_source: Option, broadcaster: T, logger: L, feeest: F, persister: P, our_peerstorage_encryption_key: [u8; 32]) -> Self { Self { monitors: RwLock::new(new_hash_map()), chain_source, @@ -389,6 +463,9 @@ where C::Target: chain::Filter, pending_monitor_events: Mutex::new(Vec::new()), highest_chain_height: AtomicUsize::new(0), event_notifier: Notifier::new(), + pending_send_only_events: Mutex::new(Vec::new()), + our_peer_storage: Mutex::new(OurPeerStorage::new()), + our_peerstorage_encryption_key } } @@ -656,6 +733,18 @@ where C::Target: chain::Filter, }); } } + + /// Retrieves all node IDs associated with the monitors. + /// + /// This function collects the counterparty node IDs from all monitors into a `HashSet`, + /// ensuring unique IDs are returned. + fn get_peer_node_ids(&self) -> HashSet { + let mon = self.monitors.read().unwrap(); + mon + .values() + .map(|monitor| monitor.monitor.get_counterparty_node_id().unwrap().clone()) + .collect() + } } impl @@ -724,6 +813,12 @@ where header, height, &*self.broadcaster, &*self.fee_estimator, &self.logger ) }); + + // Send peer storage everytime a new block arrives. + for node_id in self.get_peer_node_ids() { + self.send_peer_storage(node_id); + } + // Assume we may have some new events and wake the event processor self.event_notifier.notify(); } diff --git a/lightning/src/chain/channelmonitor.rs b/lightning/src/chain/channelmonitor.rs index 86f0d3de5ed..c51a0640467 100644 --- a/lightning/src/chain/channelmonitor.rs +++ b/lightning/src/chain/channelmonitor.rs @@ -36,8 +36,9 @@ use crate::ln::channel::INITIAL_COMMITMENT_NUMBER; use crate::ln::types::{PaymentHash, PaymentPreimage, ChannelId}; use crate::ln::msgs::DecodeError; use crate::ln::channel_keys::{DelayedPaymentKey, DelayedPaymentBasepoint, HtlcBasepoint, HtlcKey, RevocationKey, RevocationBasepoint}; -use crate::ln::chan_utils::{self,CommitmentTransaction, CounterpartyCommitmentSecrets, HTLCOutputInCommitment, HTLCClaim, ChannelTransactionParameters, HolderCommitmentTransaction, TxCreationKeys}; +use crate::ln::chan_utils::{self, CommitmentTransaction, CounterpartyCommitmentSecrets, HTLCOutputInCommitment, HTLCClaim, ChannelTransactionParameters, CounterpartyChannelTransactionParameters, HolderCommitmentTransaction, TxCreationKeys, ChannelPublicKeys}; use crate::ln::channelmanager::{HTLCSource, SentHTLCId}; +use crate::ln::features::ChannelTypeFeatures; use crate::chain; use crate::chain::{BestBlock, WatchedOutput}; use crate::chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, FeeEstimator, LowerBoundedFeeEstimator}; @@ -113,6 +114,11 @@ pub struct ChannelMonitorUpdate { /// No other [`ChannelMonitorUpdate`]s are allowed after force-close. pub const CLOSED_CHANNEL_UPDATE_ID: u64 = core::u64::MAX; +/// This update ID is used inside [`ChannelMonitorImpl`] to recognise +/// that we're dealing with a [`StubChannelMonitor`]. Since we require some +/// exceptions while dealing with it. +pub const STUB_CHANNEL_UPDATE_IDENTIFIER: u64 = core::u64::MAX - 1; + impl Writeable for ChannelMonitorUpdate { fn write(&self, w: &mut W) -> Result<(), io::Error> { write_ver_prefix!(w, SERIALIZATION_VERSION, MIN_SERIALIZATION_VERSION); @@ -850,6 +856,40 @@ pub struct ChannelMonitor { pub(super) inner: Mutex>, } +/// Data prepended to the serialised [`ChannelMonitor`] inside +/// PeerStorage so that we can identify stale or missing channel +/// monitor inside in [`FundRecoverer::handle_your_peer_storage`] & +/// [`ChannelManager::handle_your_peer_storage`]. +pub struct StubChannelInfo { + /// Minimum seen secret of the [`ChannelMonitor`] to identify stale channels. + pub min_seen_secret: u64, + /// Channel Id of the channel retrieved from Peer Storage. + pub cid: ChannelId, + /// Node Id of the counterparty. + pub counterparty_node_id: PublicKey, + /// Funding outpoint of the [`ChannelMonitor`]. + pub funding_outpoint: OutPoint, + /// Channel Keys Id of the channel retrieved from the Peer Storage. + pub channel_keys_id: [u8; 32], + /// Channel value sats of the channel. + pub channel_value_satoshi: u64 +} + +impl_writeable_tlv_based!(StubChannelInfo, { + (0, min_seen_secret, required), + (2, cid, required), + (4, counterparty_node_id, required), + (6, funding_outpoint, required), + (8, channel_keys_id, required), + (10, channel_value_satoshi, required), +}); + +/// Read [`StubChannelInfo`] from `chan_reader` of the [`ChannelMonitor`]. +pub fn get_stub_channel_info_from_ser_channel(chan_reader: &mut R) -> Result { + let stub_info: StubChannelInfo = Readable::read(chan_reader)?; + Ok(stub_info) +} + impl Clone for ChannelMonitor where Signer: Clone { fn clone(&self) -> Self { let inner = self.inner.lock().unwrap().clone(); @@ -1028,27 +1068,28 @@ impl PartialEq for ChannelMonitor where Sign } } -impl Writeable for ChannelMonitor { - fn write(&self, writer: &mut W) -> Result<(), Error> { - self.inner.lock().unwrap().write(writer) - } -} - -// These are also used for ChannelMonitorUpdate, above. -const SERIALIZATION_VERSION: u8 = 1; -const MIN_SERIALIZATION_VERSION: u8 = 1; +/// Utility function for writing [`ChannelMonitor`] to prevent code duplication in [`ChainMonitor`] while sending Peer Storage. +pub(crate) fn write_util(channel_monitor: &ChannelMonitorImpl, is_stub: bool, writer: &mut W) -> Result<(), Error> { + // Prepend min_seen_secret and ChannelID so that we can compare data in ChannelManager::your_peer_storage. + if is_stub { + let stub_info = StubChannelInfo { + min_seen_secret: channel_monitor.get_min_seen_secret(), + cid: channel_monitor.channel_id(), + counterparty_node_id: channel_monitor.counterparty_node_id.unwrap(), + channel_keys_id: channel_monitor.channel_keys_id, + funding_outpoint: channel_monitor.funding_info.0, + channel_value_satoshi: channel_monitor.channel_value_satoshis, + }; + stub_info.write(writer)?; + } -impl Writeable for ChannelMonitorImpl { - fn write(&self, writer: &mut W) -> Result<(), Error> { write_ver_prefix!(writer, SERIALIZATION_VERSION, MIN_SERIALIZATION_VERSION); - self.latest_update_id.write(writer)?; - + channel_monitor.latest_update_id.write(writer)?; // Set in initial Channel-object creation, so should always be set by now: - U48(self.commitment_transaction_number_obscure_factor).write(writer)?; - - self.destination_script.write(writer)?; - if let Some(ref broadcasted_holder_revokable_script) = self.broadcasted_holder_revokable_script { + U48(channel_monitor.commitment_transaction_number_obscure_factor).write(writer)?; + channel_monitor.destination_script.write(writer)?; + if let Some(ref broadcasted_holder_revokable_script) = channel_monitor.broadcasted_holder_revokable_script { writer.write_all(&[0; 1])?; broadcasted_holder_revokable_script.0.write(writer)?; broadcasted_holder_revokable_script.1.write(writer)?; @@ -1057,25 +1098,25 @@ impl Writeable for ChannelMonitorImpl { writer.write_all(&[1; 1])?; } - self.counterparty_payment_script.write(writer)?; - match &self.shutdown_script { + channel_monitor.counterparty_payment_script.write(writer)?; + match &channel_monitor.shutdown_script { Some(script) => script.write(writer)?, None => ScriptBuf::new().write(writer)?, } - self.channel_keys_id.write(writer)?; - self.holder_revocation_basepoint.write(writer)?; - writer.write_all(&self.funding_info.0.txid[..])?; - writer.write_all(&self.funding_info.0.index.to_be_bytes())?; - self.funding_info.1.write(writer)?; - self.current_counterparty_commitment_txid.write(writer)?; - self.prev_counterparty_commitment_txid.write(writer)?; + channel_monitor.channel_keys_id.write(writer)?; + channel_monitor.holder_revocation_basepoint.write(writer)?; + writer.write_all(&channel_monitor.funding_info.0.txid[..])?; + writer.write_all(&channel_monitor.funding_info.0.index.to_be_bytes())?; + channel_monitor.funding_info.1.write(writer)?; + channel_monitor.current_counterparty_commitment_txid.write(writer)?; + channel_monitor.prev_counterparty_commitment_txid.write(writer)?; - self.counterparty_commitment_params.write(writer)?; - self.funding_redeemscript.write(writer)?; - self.channel_value_satoshis.write(writer)?; + channel_monitor.counterparty_commitment_params.write(writer)?; + channel_monitor.funding_redeemscript.write(writer)?; + channel_monitor.channel_value_satoshis.write(writer)?; - match self.their_cur_per_commitment_points { + match channel_monitor.their_cur_per_commitment_points { Some((idx, pubkey, second_option)) => { writer.write_all(&byte_utils::be48_to_array(idx))?; writer.write_all(&pubkey.serialize())?; @@ -1093,9 +1134,9 @@ impl Writeable for ChannelMonitorImpl { }, } - writer.write_all(&self.on_holder_tx_csv.to_be_bytes())?; + writer.write_all(&channel_monitor.on_holder_tx_csv.to_be_bytes())?; - self.commitment_secrets.write(writer)?; + channel_monitor.commitment_secrets.write(writer)?; macro_rules! serialize_htlc_in_commitment { ($htlc_output: expr) => { @@ -1107,55 +1148,55 @@ impl Writeable for ChannelMonitorImpl { } } - writer.write_all(&(self.counterparty_claimable_outpoints.len() as u64).to_be_bytes())?; - for (ref txid, ref htlc_infos) in self.counterparty_claimable_outpoints.iter() { + writer.write_all(&(channel_monitor.counterparty_claimable_outpoints.len() as u64).to_be_bytes())?; + for (ref txid, ref htlc_infos) in channel_monitor.counterparty_claimable_outpoints.iter() { writer.write_all(&txid[..])?; writer.write_all(&(htlc_infos.len() as u64).to_be_bytes())?; for &(ref htlc_output, ref htlc_source) in htlc_infos.iter() { - debug_assert!(htlc_source.is_none() || Some(**txid) == self.current_counterparty_commitment_txid - || Some(**txid) == self.prev_counterparty_commitment_txid, + debug_assert!(htlc_source.is_none() || Some(**txid) == channel_monitor.current_counterparty_commitment_txid + || Some(**txid) == channel_monitor.prev_counterparty_commitment_txid, "HTLC Sources for all revoked commitment transactions should be none!"); serialize_htlc_in_commitment!(htlc_output); htlc_source.as_ref().map(|b| b.as_ref()).write(writer)?; } } - writer.write_all(&(self.counterparty_commitment_txn_on_chain.len() as u64).to_be_bytes())?; - for (ref txid, commitment_number) in self.counterparty_commitment_txn_on_chain.iter() { + writer.write_all(&(channel_monitor.counterparty_commitment_txn_on_chain.len() as u64).to_be_bytes())?; + for (ref txid, commitment_number) in channel_monitor.counterparty_commitment_txn_on_chain.iter() { writer.write_all(&txid[..])?; writer.write_all(&byte_utils::be48_to_array(*commitment_number))?; } - writer.write_all(&(self.counterparty_hash_commitment_number.len() as u64).to_be_bytes())?; - for (ref payment_hash, commitment_number) in self.counterparty_hash_commitment_number.iter() { + writer.write_all(&(channel_monitor.counterparty_hash_commitment_number.len() as u64).to_be_bytes())?; + for (ref payment_hash, commitment_number) in channel_monitor.counterparty_hash_commitment_number.iter() { writer.write_all(&payment_hash.0[..])?; writer.write_all(&byte_utils::be48_to_array(*commitment_number))?; } - if let Some(ref prev_holder_tx) = self.prev_holder_signed_commitment_tx { + if let Some(ref prev_holder_tx) = channel_monitor.prev_holder_signed_commitment_tx { writer.write_all(&[1; 1])?; prev_holder_tx.write(writer)?; } else { writer.write_all(&[0; 1])?; } - self.current_holder_commitment_tx.write(writer)?; + channel_monitor.current_holder_commitment_tx.write(writer)?; - writer.write_all(&byte_utils::be48_to_array(self.current_counterparty_commitment_number))?; - writer.write_all(&byte_utils::be48_to_array(self.current_holder_commitment_number))?; + writer.write_all(&byte_utils::be48_to_array(channel_monitor.current_counterparty_commitment_number))?; + writer.write_all(&byte_utils::be48_to_array(channel_monitor.current_holder_commitment_number))?; - writer.write_all(&(self.payment_preimages.len() as u64).to_be_bytes())?; - for payment_preimage in self.payment_preimages.values() { + writer.write_all(&(channel_monitor.payment_preimages.len() as u64).to_be_bytes())?; + for payment_preimage in channel_monitor.payment_preimages.values() { writer.write_all(&payment_preimage.0[..])?; } - writer.write_all(&(self.pending_monitor_events.iter().filter(|ev| match ev { + writer.write_all(&(channel_monitor.pending_monitor_events.iter().filter(|ev| match ev { MonitorEvent::HTLCEvent(_) => true, MonitorEvent::HolderForceClosed(_) => true, MonitorEvent::HolderForceClosedWithInfo { .. } => true, _ => false, }).count() as u64).to_be_bytes())?; - for event in self.pending_monitor_events.iter() { + for event in channel_monitor.pending_monitor_events.iter() { match event { MonitorEvent::HTLCEvent(upd) => { 0u8.write(writer)?; @@ -1170,21 +1211,21 @@ impl Writeable for ChannelMonitorImpl { } } - writer.write_all(&(self.pending_events.len() as u64).to_be_bytes())?; - for event in self.pending_events.iter() { + writer.write_all(&(channel_monitor.pending_events.len() as u64).to_be_bytes())?; + for event in channel_monitor.pending_events.iter() { event.write(writer)?; } - self.best_block.block_hash.write(writer)?; - writer.write_all(&self.best_block.height.to_be_bytes())?; + channel_monitor.best_block.block_hash.write(writer)?; + writer.write_all(&channel_monitor.best_block.height.to_be_bytes())?; - writer.write_all(&(self.onchain_events_awaiting_threshold_conf.len() as u64).to_be_bytes())?; - for ref entry in self.onchain_events_awaiting_threshold_conf.iter() { + writer.write_all(&(channel_monitor.onchain_events_awaiting_threshold_conf.len() as u64).to_be_bytes())?; + for ref entry in channel_monitor.onchain_events_awaiting_threshold_conf.iter() { entry.write(writer)?; } - (self.outputs_to_watch.len() as u64).write(writer)?; - for (txid, idx_scripts) in self.outputs_to_watch.iter() { + (channel_monitor.outputs_to_watch.len() as u64).write(writer)?; + for (txid, idx_scripts) in channel_monitor.outputs_to_watch.iter() { txid.write(writer)?; (idx_scripts.len() as u64).write(writer)?; for (idx, script) in idx_scripts.iter() { @@ -1192,40 +1233,59 @@ impl Writeable for ChannelMonitorImpl { script.write(writer)?; } } - self.onchain_tx_handler.write(writer)?; - self.lockdown_from_offchain.write(writer)?; - self.holder_tx_signed.write(writer)?; + if !is_stub { + channel_monitor.onchain_tx_handler.write(writer)?; + } + + channel_monitor.lockdown_from_offchain.write(writer)?; + channel_monitor.holder_tx_signed.write(writer)?; // If we have a `HolderForceClosedWithInfo` event, we need to write the `HolderForceClosed` for backwards compatibility. - let pending_monitor_events = match self.pending_monitor_events.iter().find(|ev| match ev { + let pending_monitor_events = match channel_monitor.pending_monitor_events.iter().find(|ev| match ev { MonitorEvent::HolderForceClosedWithInfo { .. } => true, _ => false, }) { Some(MonitorEvent::HolderForceClosedWithInfo { outpoint, .. }) => { - let mut pending_monitor_events = self.pending_monitor_events.clone(); + let mut pending_monitor_events = channel_monitor.pending_monitor_events.clone(); pending_monitor_events.push(MonitorEvent::HolderForceClosed(*outpoint)); pending_monitor_events } - _ => self.pending_monitor_events.clone(), + _ => channel_monitor.pending_monitor_events.clone(), }; write_tlv_fields!(writer, { - (1, self.funding_spend_confirmed, option), - (3, self.htlcs_resolved_on_chain, required_vec), + (1, channel_monitor.funding_spend_confirmed, option), + (3, channel_monitor.htlcs_resolved_on_chain, required_vec), (5, pending_monitor_events, required_vec), - (7, self.funding_spend_seen, required), - (9, self.counterparty_node_id, option), - (11, self.confirmed_commitment_tx_counterparty_output, option), - (13, self.spendable_txids_confirmed, required_vec), - (15, self.counterparty_fulfilled_htlcs, required), - (17, self.initial_counterparty_commitment_info, option), - (19, self.channel_id, required), - (21, self.balances_empty_height, option), - (23, self.holder_pays_commitment_tx_fee, option), + (7, channel_monitor.funding_spend_seen, required), + (9, channel_monitor.counterparty_node_id, option), + (11, channel_monitor.confirmed_commitment_tx_counterparty_output, option), + (13, channel_monitor.spendable_txids_confirmed, required_vec), + (15, channel_monitor.counterparty_fulfilled_htlcs, required), + (17, channel_monitor.initial_counterparty_commitment_info, option), + (19, channel_monitor.channel_id, required), + (21, channel_monitor.balances_empty_height, option), + (23, channel_monitor.holder_pays_commitment_tx_fee, option), }); Ok(()) + +} + +impl Writeable for ChannelMonitor { + fn write(&self, writer: &mut W) -> Result<(), Error> { + self.inner.lock().unwrap().write(writer) + } +} + +// These are also used for ChannelMonitorUpdate, above. +const SERIALIZATION_VERSION: u8 = 1; +const MIN_SERIALIZATION_VERSION: u8 = 1; + +impl Writeable for ChannelMonitorImpl { + fn write(&self, writer: &mut W) -> Result<(), Error> { + write_util(self, false, writer) } } @@ -1432,6 +1492,13 @@ impl ChannelMonitor { }) } + pub(crate) fn merge_commitment_secret(&mut self, monitor: ChannelMonitor) { + if self.get_min_seen_secret() > monitor.get_min_seen_secret() { + let inner = monitor.inner.lock().unwrap(); + self.inner.lock().unwrap().commitment_secrets = inner.commitment_secrets.clone(); + } + } + #[cfg(test)] fn provide_secret(&self, idx: u64, secret: [u8; 32]) -> Result<(), &'static str> { self.inner.lock().unwrap().provide_secret(idx, secret) @@ -3493,6 +3560,10 @@ impl ChannelMonitorImpl { block_hash, per_commitment_claimable_data.iter().map(|(htlc, htlc_source)| (htlc, htlc_source.as_ref().map(|htlc_source| htlc_source.as_ref())) ), logger); + } else if self.latest_update_id == STUB_CHANNEL_UPDATE_IDENTIFIER { + // Since we aren't storing per commitment option inside stub channels. + fail_unbroadcast_htlcs!(self, "revoked counterparty", commitment_txid, tx, height, + block_hash, [].iter().map(|reference| *reference), logger); } else { // Our fuzzers aren't constrained by pesky things like valid signatures, so can // spend our funding output with a transaction which doesn't match our past @@ -4253,6 +4324,9 @@ impl ChannelMonitorImpl { if *idx == input.previous_output.vout { #[cfg(test)] { + if self.latest_update_id == STUB_CHANNEL_UPDATE_IDENTIFIER { + return true; + } // If the expected script is a known type, check that the witness // appears to be spending the correct type (ie that the match would // actually succeed in BIP 158/159-style filters). @@ -4659,322 +4733,420 @@ where const MAX_ALLOC_SIZE: usize = 64*1024; -impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP)> - for (BlockHash, ChannelMonitor) { - fn read(reader: &mut R, args: (&'a ES, &'b SP)) -> Result { - macro_rules! unwrap_obj { - ($key: expr) => { - match $key { - Ok(res) => res, - Err(_) => return Err(DecodeError::InvalidValue), - } +/// Configuration options for utilities that read and deserialize data. +/// +/// This enum provides two modes of operation: +/// - `IsStub`: Uses predefined keys and a cryptographic context. +/// - `NotStub`: Uses external sources for entropy and signer management. +/// +/// # Variants +/// +/// - `IsStub`: +/// - `keys`: Cryptographic keys (`ChannelSigner`). +/// - `secp_ctx`: Cryptographic context (`Secp256k1`). +/// +/// - `NotStub`: +/// - `entropy_source`: Reference to an entropy source (`EntropySource`). +/// - `signer_provider`: Reference to a signer provider (`SignerProvider`). +/// +/// Use this enum to configure operations like reading serialized data into +/// a `ChannelMonitor`. +pub enum ReadUtilOpt<'a, 'b, ChannelSigner, SP, ES> +where + ChannelSigner: EcdsaChannelSigner, + SP: SignerProvider, + ES: EntropySource, +{ + /// Parameters required for `read_util` to read [`ChannelMonitor`] from PeerStorage. + IsStub { + keys: ChannelSigner, + secp_ctx: Secp256k1 + }, + /// Parameters required for `read_util` to read [`ChannelMonitor`] + /// which are not from PeerStorage. + NotStub { + entropy_source: &'a ES, + signer_provider: &'b SP + } +} + +/// Utility function for reading [`ChannelMonitor`]. +pub fn read_util<'a, 'b, R, ChannelSigner: EcdsaChannelSigner, SP, ES>(reader: &mut R, params: ReadUtilOpt<'a, 'b, ChannelSigner, SP, ES>) -> Result<(BlockHash, ChannelMonitor), DecodeError> +where + R: io::Read, + SP: SignerProvider, + ES: EntropySource, +{ + macro_rules! unwrap_obj { + ($key: expr) => { + match $key { + Ok(res) => res, + Err(_) => return Err(DecodeError::InvalidValue), } } - - let (entropy_source, signer_provider) = args; - - let _ver = read_ver_prefix!(reader, SERIALIZATION_VERSION); - - let latest_update_id: u64 = Readable::read(reader)?; - let commitment_transaction_number_obscure_factor = ::read(reader)?.0; - - let destination_script = Readable::read(reader)?; - let broadcasted_holder_revokable_script = match ::read(reader)? { - 0 => { - let revokable_address = Readable::read(reader)?; - let per_commitment_point = Readable::read(reader)?; - let revokable_script = Readable::read(reader)?; - Some((revokable_address, per_commitment_point, revokable_script)) - }, - 1 => { None }, - _ => return Err(DecodeError::InvalidValue), - }; - let mut counterparty_payment_script: ScriptBuf = Readable::read(reader)?; - let shutdown_script = { - let script = ::read(reader)?; - if script.is_empty() { None } else { Some(script) } - }; - - let channel_keys_id = Readable::read(reader)?; - let holder_revocation_basepoint = Readable::read(reader)?; - // Technically this can fail and serialize fail a round-trip, but only for serialization of - // barely-init'd ChannelMonitors that we can't do anything with. - let outpoint = OutPoint { - txid: Readable::read(reader)?, - index: Readable::read(reader)?, - }; - let funding_info = (outpoint, Readable::read(reader)?); - let current_counterparty_commitment_txid = Readable::read(reader)?; - let prev_counterparty_commitment_txid = Readable::read(reader)?; - - let counterparty_commitment_params = Readable::read(reader)?; - let funding_redeemscript = Readable::read(reader)?; - let channel_value_satoshis = Readable::read(reader)?; - - let their_cur_per_commitment_points = { - let first_idx = ::read(reader)?.0; - if first_idx == 0 { - None + } + let _ver = read_ver_prefix!(reader, SERIALIZATION_VERSION); + + let mut latest_update_id: u64 = Readable::read(reader)?; + let commitment_transaction_number_obscure_factor = ::read(reader)?.0; + let destination_script: ScriptBuf = Readable::read(reader)?; + + let broadcasted_holder_revokable_script = match ::read(reader)? { + 0 => { + let revokable_address = Readable::read(reader)?; + let per_commitment_point = Readable::read(reader)?; + let revokable_script = Readable::read(reader)?; + Some((revokable_address, per_commitment_point, revokable_script)) + }, + 1 => { None }, + _ => return Err(DecodeError::InvalidValue), + }; + let mut counterparty_payment_script: ScriptBuf = Readable::read(reader)?; + let shutdown_script = { + let script = ::read(reader)?; + if script.is_empty() { None } else { Some(script) } + }; + + let channel_keys_id = Readable::read(reader)?; + let holder_revocation_basepoint = Readable::read(reader)?; + // Technically this can fail and serialize fail a round-trip, but only for serialization of + // barely-init'd ChannelMonitors that we can't do anything with. + let outpoint = OutPoint { + txid: Readable::read(reader)?, + index: Readable::read(reader)?, + }; + let funding_info = (outpoint, Readable::read(reader)?); + let current_counterparty_commitment_txid = Readable::read(reader)?; + let prev_counterparty_commitment_txid = Readable::read(reader)?; + + let counterparty_commitment_params: CounterpartyCommitmentParameters = Readable::read(reader)?; + let funding_redeemscript = Readable::read(reader)?; + let channel_value_satoshis = Readable::read(reader)?; + + let their_cur_per_commitment_points = { + let first_idx = ::read(reader)?.0; + if first_idx == 0 { + None + } else { + let first_point = Readable::read(reader)?; + let second_point_slice: [u8; 33] = Readable::read(reader)?; + if second_point_slice[0..32] == [0; 32] && second_point_slice[32] == 0 { + Some((first_idx, first_point, None)) } else { - let first_point = Readable::read(reader)?; - let second_point_slice: [u8; 33] = Readable::read(reader)?; - if second_point_slice[0..32] == [0; 32] && second_point_slice[32] == 0 { - Some((first_idx, first_point, None)) - } else { - Some((first_idx, first_point, Some(unwrap_obj!(PublicKey::from_slice(&second_point_slice))))) - } + Some((first_idx, first_point, Some(unwrap_obj!(PublicKey::from_slice(&second_point_slice))))) } - }; + } + }; - let on_holder_tx_csv: u16 = Readable::read(reader)?; + let on_holder_tx_csv: u16 = Readable::read(reader)?; - let commitment_secrets = Readable::read(reader)?; + let commitment_secrets = Readable::read(reader)?; - macro_rules! read_htlc_in_commitment { - () => { - { - let offered: bool = Readable::read(reader)?; - let amount_msat: u64 = Readable::read(reader)?; - let cltv_expiry: u32 = Readable::read(reader)?; - let payment_hash: PaymentHash = Readable::read(reader)?; - let transaction_output_index: Option = Readable::read(reader)?; - - HTLCOutputInCommitment { - offered, amount_msat, cltv_expiry, payment_hash, transaction_output_index - } + macro_rules! read_htlc_in_commitment { + () => { + { + let offered: bool = Readable::read(reader)?; + let amount_msat: u64 = Readable::read(reader)?; + let cltv_expiry: u32 = Readable::read(reader)?; + let payment_hash: PaymentHash = Readable::read(reader)?; + let transaction_output_index: Option = Readable::read(reader)?; + + HTLCOutputInCommitment { + offered, amount_msat, cltv_expiry, payment_hash, transaction_output_index } } } + } - let counterparty_claimable_outpoints_len: u64 = Readable::read(reader)?; - let mut counterparty_claimable_outpoints = hash_map_with_capacity(cmp::min(counterparty_claimable_outpoints_len as usize, MAX_ALLOC_SIZE / 64)); - for _ in 0..counterparty_claimable_outpoints_len { - let txid: Txid = Readable::read(reader)?; - let htlcs_count: u64 = Readable::read(reader)?; - let mut htlcs = Vec::with_capacity(cmp::min(htlcs_count as usize, MAX_ALLOC_SIZE / 32)); - for _ in 0..htlcs_count { - htlcs.push((read_htlc_in_commitment!(), as Readable>::read(reader)?.map(|o: HTLCSource| Box::new(o)))); - } - if let Some(_) = counterparty_claimable_outpoints.insert(txid, htlcs) { - return Err(DecodeError::InvalidValue); - } + let counterparty_claimable_outpoints_len: u64 = Readable::read(reader)?; + let mut counterparty_claimable_outpoints = hash_map_with_capacity(cmp::min(counterparty_claimable_outpoints_len as usize, MAX_ALLOC_SIZE / 64)); + for _ in 0..counterparty_claimable_outpoints_len { + let txid: Txid = Readable::read(reader)?; + let htlcs_count: u64 = Readable::read(reader)?; + let mut htlcs = Vec::with_capacity(cmp::min(htlcs_count as usize, MAX_ALLOC_SIZE / 32)); + for _ in 0..htlcs_count { + htlcs.push((read_htlc_in_commitment!(), as Readable>::read(reader)?.map(|o: HTLCSource| Box::new(o)))); + } + if let Some(_) = counterparty_claimable_outpoints.insert(txid, htlcs) { + return Err(DecodeError::InvalidValue); } + } - let counterparty_commitment_txn_on_chain_len: u64 = Readable::read(reader)?; - let mut counterparty_commitment_txn_on_chain = hash_map_with_capacity(cmp::min(counterparty_commitment_txn_on_chain_len as usize, MAX_ALLOC_SIZE / 32)); - for _ in 0..counterparty_commitment_txn_on_chain_len { - let txid: Txid = Readable::read(reader)?; - let commitment_number = ::read(reader)?.0; - if let Some(_) = counterparty_commitment_txn_on_chain.insert(txid, commitment_number) { - return Err(DecodeError::InvalidValue); - } + let counterparty_commitment_txn_on_chain_len: u64 = Readable::read(reader)?; + let mut counterparty_commitment_txn_on_chain = hash_map_with_capacity(cmp::min(counterparty_commitment_txn_on_chain_len as usize, MAX_ALLOC_SIZE / 32)); + for _ in 0..counterparty_commitment_txn_on_chain_len { + let txid: Txid = Readable::read(reader)?; + let commitment_number = ::read(reader)?.0; + if let Some(_) = counterparty_commitment_txn_on_chain.insert(txid, commitment_number) { + return Err(DecodeError::InvalidValue); } + } - let counterparty_hash_commitment_number_len: u64 = Readable::read(reader)?; - let mut counterparty_hash_commitment_number = hash_map_with_capacity(cmp::min(counterparty_hash_commitment_number_len as usize, MAX_ALLOC_SIZE / 32)); - for _ in 0..counterparty_hash_commitment_number_len { - let payment_hash: PaymentHash = Readable::read(reader)?; - let commitment_number = ::read(reader)?.0; - if let Some(_) = counterparty_hash_commitment_number.insert(payment_hash, commitment_number) { - return Err(DecodeError::InvalidValue); - } + let counterparty_hash_commitment_number_len: u64 = Readable::read(reader)?; + let mut counterparty_hash_commitment_number = hash_map_with_capacity(cmp::min(counterparty_hash_commitment_number_len as usize, MAX_ALLOC_SIZE / 32)); + for _ in 0..counterparty_hash_commitment_number_len { + let payment_hash: PaymentHash = Readable::read(reader)?; + let commitment_number = ::read(reader)?.0; + if let Some(_) = counterparty_hash_commitment_number.insert(payment_hash, commitment_number) { + return Err(DecodeError::InvalidValue); } + } - let mut prev_holder_signed_commitment_tx: Option = - match ::read(reader)? { - 0 => None, - 1 => { - Some(Readable::read(reader)?) - }, - _ => return Err(DecodeError::InvalidValue), - }; - let mut current_holder_commitment_tx: HolderSignedTx = Readable::read(reader)?; + let mut prev_holder_signed_commitment_tx: Option = + match ::read(reader)? { + 0 => None, + 1 => { + Some(Readable::read(reader)?) + }, + _ => return Err(DecodeError::InvalidValue), + }; + let mut current_holder_commitment_tx: HolderSignedTx = Readable::read(reader)?; - let current_counterparty_commitment_number = ::read(reader)?.0; - let current_holder_commitment_number = ::read(reader)?.0; + let current_counterparty_commitment_number = ::read(reader)?.0; + let current_holder_commitment_number = ::read(reader)?.0; - let payment_preimages_len: u64 = Readable::read(reader)?; - let mut payment_preimages = hash_map_with_capacity(cmp::min(payment_preimages_len as usize, MAX_ALLOC_SIZE / 32)); - for _ in 0..payment_preimages_len { - let preimage: PaymentPreimage = Readable::read(reader)?; - let hash = PaymentHash(Sha256::hash(&preimage.0[..]).to_byte_array()); - if let Some(_) = payment_preimages.insert(hash, preimage) { - return Err(DecodeError::InvalidValue); - } + let payment_preimages_len: u64 = Readable::read(reader)?; + let mut payment_preimages = hash_map_with_capacity(cmp::min(payment_preimages_len as usize, MAX_ALLOC_SIZE / 32)); + for _ in 0..payment_preimages_len { + let preimage: PaymentPreimage = Readable::read(reader)?; + let hash = PaymentHash(Sha256::hash(&preimage.0[..]).to_byte_array()); + if let Some(_) = payment_preimages.insert(hash, preimage) { + return Err(DecodeError::InvalidValue); } + } - let pending_monitor_events_len: u64 = Readable::read(reader)?; - let mut pending_monitor_events = Some( - Vec::with_capacity(cmp::min(pending_monitor_events_len as usize, MAX_ALLOC_SIZE / (32 + 8*3)))); - for _ in 0..pending_monitor_events_len { - let ev = match ::read(reader)? { - 0 => MonitorEvent::HTLCEvent(Readable::read(reader)?), - 1 => MonitorEvent::HolderForceClosed(funding_info.0), - _ => return Err(DecodeError::InvalidValue) - }; - pending_monitor_events.as_mut().unwrap().push(ev); - } + let pending_monitor_events_len: u64 = Readable::read(reader)?; + let mut pending_monitor_events = Some( + Vec::with_capacity(cmp::min(pending_monitor_events_len as usize, MAX_ALLOC_SIZE / (32 + 8*3)))); + for _ in 0..pending_monitor_events_len { + let ev = match ::read(reader)? { + 0 => MonitorEvent::HTLCEvent(Readable::read(reader)?), + 1 => MonitorEvent::HolderForceClosed(funding_info.0), + _ => return Err(DecodeError::InvalidValue) + }; + pending_monitor_events.as_mut().unwrap().push(ev); + } - let pending_events_len: u64 = Readable::read(reader)?; - let mut pending_events = Vec::with_capacity(cmp::min(pending_events_len as usize, MAX_ALLOC_SIZE / mem::size_of::())); - for _ in 0..pending_events_len { - if let Some(event) = MaybeReadable::read(reader)? { - pending_events.push(event); - } + let pending_events_len: u64 = Readable::read(reader)?; + let mut pending_events = Vec::with_capacity(cmp::min(pending_events_len as usize, MAX_ALLOC_SIZE / mem::size_of::())); + for _ in 0..pending_events_len { + if let Some(event) = MaybeReadable::read(reader)? { + pending_events.push(event); } + } - let best_block = BestBlock::new(Readable::read(reader)?, Readable::read(reader)?); + let best_block = BestBlock::new(Readable::read(reader)?, Readable::read(reader)?); - let waiting_threshold_conf_len: u64 = Readable::read(reader)?; - let mut onchain_events_awaiting_threshold_conf = Vec::with_capacity(cmp::min(waiting_threshold_conf_len as usize, MAX_ALLOC_SIZE / 128)); - for _ in 0..waiting_threshold_conf_len { - if let Some(val) = MaybeReadable::read(reader)? { - onchain_events_awaiting_threshold_conf.push(val); - } + let waiting_threshold_conf_len: u64 = Readable::read(reader)?; + let mut onchain_events_awaiting_threshold_conf = Vec::with_capacity(cmp::min(waiting_threshold_conf_len as usize, MAX_ALLOC_SIZE / 128)); + for _ in 0..waiting_threshold_conf_len { + if let Some(val) = MaybeReadable::read(reader)? { + onchain_events_awaiting_threshold_conf.push(val); } + } - let outputs_to_watch_len: u64 = Readable::read(reader)?; - let mut outputs_to_watch = hash_map_with_capacity(cmp::min(outputs_to_watch_len as usize, MAX_ALLOC_SIZE / (mem::size_of::() + mem::size_of::() + mem::size_of::>()))); - for _ in 0..outputs_to_watch_len { - let txid = Readable::read(reader)?; - let outputs_len: u64 = Readable::read(reader)?; - let mut outputs = Vec::with_capacity(cmp::min(outputs_len as usize, MAX_ALLOC_SIZE / (mem::size_of::() + mem::size_of::()))); - for _ in 0..outputs_len { - outputs.push((Readable::read(reader)?, Readable::read(reader)?)); - } - if let Some(_) = outputs_to_watch.insert(txid, outputs) { - return Err(DecodeError::InvalidValue); - } - } - let onchain_tx_handler: OnchainTxHandler = ReadableArgs::read( - reader, (entropy_source, signer_provider, channel_value_satoshis, channel_keys_id) - )?; - - let lockdown_from_offchain = Readable::read(reader)?; - let holder_tx_signed = Readable::read(reader)?; - - if let Some(prev_commitment_tx) = prev_holder_signed_commitment_tx.as_mut() { - let prev_holder_value = onchain_tx_handler.get_prev_holder_commitment_to_self_value(); - if prev_holder_value.is_none() { return Err(DecodeError::InvalidValue); } - if prev_commitment_tx.to_self_value_sat == u64::max_value() { - prev_commitment_tx.to_self_value_sat = prev_holder_value.unwrap(); - } else if prev_commitment_tx.to_self_value_sat != prev_holder_value.unwrap() { - return Err(DecodeError::InvalidValue); - } + let outputs_to_watch_len: u64 = Readable::read(reader)?; + let mut outputs_to_watch = hash_map_with_capacity(cmp::min(outputs_to_watch_len as usize, MAX_ALLOC_SIZE / (mem::size_of::() + mem::size_of::() + mem::size_of::>()))); + for _ in 0..outputs_to_watch_len { + let txid = Readable::read(reader)?; + let outputs_len: u64 = Readable::read(reader)?; + let mut outputs = Vec::with_capacity(cmp::min(outputs_len as usize, MAX_ALLOC_SIZE / (mem::size_of::() + mem::size_of::()))); + for _ in 0..outputs_len { + outputs.push((Readable::read(reader)?, Readable::read(reader)?)); } - - let cur_holder_value = onchain_tx_handler.get_cur_holder_commitment_to_self_value(); - if current_holder_commitment_tx.to_self_value_sat == u64::max_value() { - current_holder_commitment_tx.to_self_value_sat = cur_holder_value; - } else if current_holder_commitment_tx.to_self_value_sat != cur_holder_value { + if let Some(_) = outputs_to_watch.insert(txid, outputs) { return Err(DecodeError::InvalidValue); } + } - let mut funding_spend_confirmed = None; - let mut htlcs_resolved_on_chain = Some(Vec::new()); - let mut funding_spend_seen = Some(false); - let mut counterparty_node_id = None; - let mut confirmed_commitment_tx_counterparty_output = None; - let mut spendable_txids_confirmed = Some(Vec::new()); - let mut counterparty_fulfilled_htlcs = Some(new_hash_map()); - let mut initial_counterparty_commitment_info = None; - let mut balances_empty_height = None; - let mut channel_id = None; - let mut holder_pays_commitment_tx_fee = None; - read_tlv_fields!(reader, { - (1, funding_spend_confirmed, option), - (3, htlcs_resolved_on_chain, optional_vec), - (5, pending_monitor_events, optional_vec), - (7, funding_spend_seen, option), - (9, counterparty_node_id, option), - (11, confirmed_commitment_tx_counterparty_output, option), - (13, spendable_txids_confirmed, optional_vec), - (15, counterparty_fulfilled_htlcs, option), - (17, initial_counterparty_commitment_info, option), - (19, channel_id, option), - (21, balances_empty_height, option), - (23, holder_pays_commitment_tx_fee, option), - }); - // `HolderForceClosedWithInfo` replaced `HolderForceClosed` in v0.0.122. If we have both - // events, we can remove the `HolderForceClosed` event and just keep the `HolderForceClosedWithInfo`. - if let Some(ref mut pending_monitor_events) = pending_monitor_events { - if pending_monitor_events.iter().any(|e| matches!(e, MonitorEvent::HolderForceClosed(_))) && - pending_monitor_events.iter().any(|e| matches!(e, MonitorEvent::HolderForceClosedWithInfo { .. })) - { - pending_monitor_events.retain(|e| !matches!(e, MonitorEvent::HolderForceClosed(_))); + let onchain_tx_handler: OnchainTxHandler; + + match params { + ReadUtilOpt::IsStub { mut keys, secp_ctx } => { + latest_update_id = STUB_CHANNEL_UPDATE_IDENTIFIER; + let channel_parameters = ChannelTransactionParameters { + holder_pubkeys: keys.pubkeys().clone(), + is_outbound_from_holder: true, + holder_selected_contest_delay: 66, + counterparty_parameters: Some(CounterpartyChannelTransactionParameters { + pubkeys: ChannelPublicKeys { + funding_pubkey: PublicKey::from_secret_key( + &secp_ctx, + &SecretKey::from_slice(&[44; 32]).unwrap(), + ), + revocation_basepoint: RevocationBasepoint::from( + PublicKey::from_secret_key( + &secp_ctx, + &SecretKey::from_slice(&[45; 32]).unwrap(), + ), + ), + payment_point: PublicKey::from_secret_key( + &secp_ctx, + &SecretKey::from_slice(&[46; 32]).unwrap(), + ), + delayed_payment_basepoint: counterparty_commitment_params.counterparty_delayed_payment_base_key, + htlc_basepoint: counterparty_commitment_params.counterparty_htlc_base_key, + }, + selected_contest_delay: counterparty_commitment_params.on_counterparty_tx_csv, + }), + funding_outpoint: Some(funding_info.0), + channel_type_features: ChannelTypeFeatures::only_static_remote_key(), + }; + keys.provide_channel_parameters(&channel_parameters); + let dummy_key = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap()); + let dummy_sig = crate::crypto::utils::sign(&secp_ctx, &secp256k1::Message::from_digest_slice(&[42; 32]).unwrap(), &SecretKey::from_slice(&[42; 32]).unwrap()); + let dummy_tx_creation_keys = TxCreationKeys { + per_commitment_point: dummy_key.clone(), + revocation_key: RevocationKey::from_basepoint(&secp_ctx, &RevocationBasepoint::from(dummy_key), &dummy_key), + broadcaster_htlc_key: HtlcKey::from_basepoint(&secp_ctx, &HtlcBasepoint::from(dummy_key), &dummy_key), + countersignatory_htlc_key: HtlcKey::from_basepoint(&secp_ctx, &HtlcBasepoint::from(dummy_key), &dummy_key), + broadcaster_delayed_payment_key: DelayedPaymentKey::from_basepoint(&secp_ctx, &DelayedPaymentBasepoint::from(dummy_key), &dummy_key), + }; + let mut nondust_htlcs: Vec<(HTLCOutputInCommitment, Option>)> = Vec::new(); + let inner = CommitmentTransaction::new_with_auxiliary_htlc_data(0, 0, 0, dummy_key.clone(), dummy_key.clone(), dummy_tx_creation_keys, 0, &mut nondust_htlcs, &channel_parameters.as_counterparty_broadcastable()); + let holder_commitment = HolderCommitmentTransaction::new(inner, dummy_sig, Vec::new(), &dummy_key, &PublicKey::from_slice(&[2;33]).unwrap()); + + onchain_tx_handler = OnchainTxHandler::new(channel_value_satoshis, channel_keys_id, destination_script.clone(), keys, channel_parameters, holder_commitment, secp_ctx); + } + ReadUtilOpt::NotStub { entropy_source, signer_provider } => { + onchain_tx_handler = ReadableArgs::read( + reader, (entropy_source, signer_provider, channel_value_satoshis, channel_keys_id) + )?; + let cur_holder_value = onchain_tx_handler.get_cur_holder_commitment_to_self_value(); + if current_holder_commitment_tx.to_self_value_sat == u64::max_value() { + current_holder_commitment_tx.to_self_value_sat = cur_holder_value; + } else if current_holder_commitment_tx.to_self_value_sat != cur_holder_value { + return Err(DecodeError::InvalidValue); + } + + if let Some(prev_commitment_tx) = prev_holder_signed_commitment_tx.as_mut() { + let prev_holder_value = onchain_tx_handler.get_prev_holder_commitment_to_self_value(); + if prev_holder_value.is_none() { return Err(DecodeError::InvalidValue); } + if prev_commitment_tx.to_self_value_sat == u64::max_value() { + prev_commitment_tx.to_self_value_sat = prev_holder_value.unwrap(); + } else if prev_commitment_tx.to_self_value_sat != prev_holder_value.unwrap() { + return Err(DecodeError::InvalidValue); + } } } + } - // Monitors for anchor outputs channels opened in v0.0.116 suffered from a bug in which the - // wrong `counterparty_payment_script` was being tracked. Fix it now on deserialization to - // give them a chance to recognize the spendable output. - if onchain_tx_handler.channel_type_features().supports_anchors_zero_fee_htlc_tx() && - counterparty_payment_script.is_p2wpkh() + let lockdown_from_offchain = Readable::read(reader)?; + let holder_tx_signed = Readable::read(reader)?; + + let mut funding_spend_confirmed = None; + let mut htlcs_resolved_on_chain = Some(Vec::new()); + let mut funding_spend_seen = Some(false); + let mut counterparty_node_id = None; + let mut confirmed_commitment_tx_counterparty_output = None; + let mut spendable_txids_confirmed = Some(Vec::new()); + let mut counterparty_fulfilled_htlcs = Some(new_hash_map()); + let mut initial_counterparty_commitment_info = None; + let mut balances_empty_height = None; + let mut channel_id = None; + let mut holder_pays_commitment_tx_fee = None; + read_tlv_fields!(reader, { + (1, funding_spend_confirmed, option), + (3, htlcs_resolved_on_chain, optional_vec), + (5, pending_monitor_events, optional_vec), + (7, funding_spend_seen, option), + (9, counterparty_node_id, option), + (11, confirmed_commitment_tx_counterparty_output, option), + (13, spendable_txids_confirmed, optional_vec), + (15, counterparty_fulfilled_htlcs, option), + (17, initial_counterparty_commitment_info, option), + (19, channel_id, option), + (21, balances_empty_height, option), + (23, holder_pays_commitment_tx_fee, option), + }); + + // `HolderForceClosedWithInfo` replaced `HolderForceClosed` in v0.0.122. If we have both + // events, we can remove the `HolderForceClosed` event and just keep the `HolderForceClosedWithInfo`. + if let Some(ref mut pending_monitor_events) = pending_monitor_events { + if pending_monitor_events.iter().any(|e| matches!(e, MonitorEvent::HolderForceClosed(_))) && + pending_monitor_events.iter().any(|e| matches!(e, MonitorEvent::HolderForceClosedWithInfo { .. })) { - let payment_point = onchain_tx_handler.channel_transaction_parameters.holder_pubkeys.payment_point; - counterparty_payment_script = - chan_utils::get_to_countersignatory_with_anchors_redeemscript(&payment_point).to_p2wsh(); + pending_monitor_events.retain(|e| !matches!(e, MonitorEvent::HolderForceClosed(_))); } + } - Ok((best_block.block_hash, ChannelMonitor::from_impl(ChannelMonitorImpl { - latest_update_id, - commitment_transaction_number_obscure_factor, - - destination_script, - broadcasted_holder_revokable_script, - counterparty_payment_script, - shutdown_script, - - channel_keys_id, - holder_revocation_basepoint, - channel_id: channel_id.unwrap_or(ChannelId::v1_from_funding_outpoint(outpoint)), - funding_info, - current_counterparty_commitment_txid, - prev_counterparty_commitment_txid, - - counterparty_commitment_params, - funding_redeemscript, - channel_value_satoshis, - their_cur_per_commitment_points, - - on_holder_tx_csv, - - commitment_secrets, - counterparty_claimable_outpoints, - counterparty_commitment_txn_on_chain, - counterparty_hash_commitment_number, - counterparty_fulfilled_htlcs: counterparty_fulfilled_htlcs.unwrap(), - - prev_holder_signed_commitment_tx, - current_holder_commitment_tx, - current_counterparty_commitment_number, - current_holder_commitment_number, - - payment_preimages, - pending_monitor_events: pending_monitor_events.unwrap(), - pending_events, - is_processing_pending_events: false, - - onchain_events_awaiting_threshold_conf, - outputs_to_watch, - - onchain_tx_handler, + // Monitors for anchor outputs channels opened in v0.0.116 suffered from a bug in which the + // wrong `counterparty_payment_script` was being tracked. Fix it now on deserialization to + // give them a chance to recognize the spendable output. + if onchain_tx_handler.channel_type_features().supports_anchors_zero_fee_htlc_tx() && + counterparty_payment_script.is_p2wpkh() + { + let payment_point = onchain_tx_handler.channel_transaction_parameters.holder_pubkeys.payment_point; + counterparty_payment_script = + chan_utils::get_to_countersignatory_with_anchors_redeemscript(&payment_point).to_p2wsh(); + } - lockdown_from_offchain, - holder_tx_signed, - holder_pays_commitment_tx_fee, - funding_spend_seen: funding_spend_seen.unwrap(), - funding_spend_confirmed, - confirmed_commitment_tx_counterparty_output, - htlcs_resolved_on_chain: htlcs_resolved_on_chain.unwrap(), - spendable_txids_confirmed: spendable_txids_confirmed.unwrap(), + Ok((best_block.block_hash, ChannelMonitor::from_impl(ChannelMonitorImpl { + latest_update_id, + commitment_transaction_number_obscure_factor, + + destination_script, + broadcasted_holder_revokable_script, + counterparty_payment_script, + shutdown_script, + + channel_keys_id, + holder_revocation_basepoint, + channel_id: channel_id.unwrap_or(ChannelId::v1_from_funding_outpoint(outpoint)), + funding_info, + current_counterparty_commitment_txid, + prev_counterparty_commitment_txid, + + counterparty_commitment_params, + funding_redeemscript, + channel_value_satoshis, + their_cur_per_commitment_points, + + on_holder_tx_csv, + + commitment_secrets, + counterparty_claimable_outpoints, + counterparty_commitment_txn_on_chain, + counterparty_hash_commitment_number, + counterparty_fulfilled_htlcs: counterparty_fulfilled_htlcs.unwrap(), + + prev_holder_signed_commitment_tx, + current_holder_commitment_tx, + current_counterparty_commitment_number, + current_holder_commitment_number, + + payment_preimages, + pending_monitor_events: pending_monitor_events.unwrap(), + pending_events, + is_processing_pending_events: false, + + onchain_events_awaiting_threshold_conf, + outputs_to_watch, + + onchain_tx_handler, + + lockdown_from_offchain, + holder_tx_signed, + holder_pays_commitment_tx_fee, + funding_spend_seen: funding_spend_seen.unwrap(), + funding_spend_confirmed, + confirmed_commitment_tx_counterparty_output, + htlcs_resolved_on_chain: htlcs_resolved_on_chain.unwrap(), + spendable_txids_confirmed: spendable_txids_confirmed.unwrap(), + + best_block, + counterparty_node_id, + initial_counterparty_commitment_info, + balances_empty_height, + }))) +} - best_block, - counterparty_node_id, - initial_counterparty_commitment_info, - balances_empty_height, - }))) +impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP)> + for (BlockHash, ChannelMonitor) { + fn read(reader: &mut R, args: (&'a ES, &'b SP)) -> Result { + let (entropy_source, signer_provider) = args; + read_util(reader, ReadUtilOpt::NotStub { entropy_source, signer_provider }) } } diff --git a/lightning/src/events/mod.rs b/lightning/src/events/mod.rs index 4a179f43985..24748a096b3 100644 --- a/lightning/src/events/mod.rs +++ b/lightning/src/events/mod.rs @@ -2574,6 +2574,23 @@ pub enum MessageSendEvent { /// The gossip_timestamp_filter which should be sent. msg: msgs::GossipTimestampFilter, }, + /// Sends a channel partner Peer Storage of our backup which they should store. + /// This should be sent on each new connection to the channel partner or whenever we want + /// them to update the backup that they store. + SendPeerStorageMessage { + /// The node_id of this message recipient + node_id: PublicKey, + /// The PeerStorageMessage which should be sent. + msg: msgs::PeerStorageMessage, + }, + /// Sends a channel partner their own peer storage which we store and update when they send + /// a [`msgs::PeerStorageMessage`]. + SendYourPeerStorageMessage { + /// The node_id of this message recipient + node_id: PublicKey, + /// The YourPeerStorageMessage which should be sent. + msg: msgs::YourPeerStorageMessage, + } } /// A trait indicating an object may generate message send events diff --git a/lightning/src/ln/blinded_payment_tests.rs b/lightning/src/ln/blinded_payment_tests.rs index d099e439ae5..776eadab779 100644 --- a/lightning/src/ln/blinded_payment_tests.rs +++ b/lightning/src/ln/blinded_payment_tests.rs @@ -1466,6 +1466,7 @@ fn route_blinding_spec_test_vector() { fn sign_invoice( &self, _invoice: &RawBolt11Invoice, _recipient: Recipient, ) -> Result { unreachable!() } + fn get_peer_storage_key(&self) -> [u8;32] { unreachable!() } fn sign_bolt12_invoice_request( &self, _invoice_request: &UnsignedInvoiceRequest, ) -> Result { unreachable!() } diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 250704ea2a7..b31bb8e99f3 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -2074,6 +2074,10 @@ impl ChannelContext where SP::Target: SignerProvider { self.update_time_counter } + pub fn get_commitment_secret(&self) -> CounterpartyCommitmentSecrets { + self.commitment_secrets.clone() + } + pub fn get_latest_monitor_update_id(&self) -> u64 { self.latest_monitor_update_id } diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index bdf67b5da7c..85d39891d53 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -40,13 +40,14 @@ use crate::blinded_path::payment::{BlindedPaymentPath, Bolt12OfferContext, Bolt1 use crate::chain; use crate::chain::{Confirm, ChannelMonitorUpdateStatus, Watch, BestBlock}; use crate::chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, FeeEstimator, LowerBoundedFeeEstimator}; -use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, WithChannelMonitor, ChannelMonitorUpdateStep, HTLC_FAIL_BACK_BUFFER, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY, MonitorEvent, CLOSED_CHANNEL_UPDATE_ID}; +use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, MonitorEvent, WithChannelMonitor, ANTI_REORG_DELAY, CLOSED_CHANNEL_UPDATE_ID, CLTV_CLAIM_BUFFER, HTLC_FAIL_BACK_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, STUB_CHANNEL_UPDATE_IDENTIFIER}; use crate::chain::transaction::{OutPoint, TransactionData}; use crate::events; use crate::events::{Event, EventHandler, EventsProvider, MessageSendEvent, MessageSendEventsProvider, ClosureReason, HTLCDestination, PaymentFailureReason, ReplayEvent}; // Since this struct is returned in `list_channels` methods, expose it here in case users want to // construct one themselves. use crate::ln::inbound_payment; +use crate::ln::our_peer_storage::OurPeerStorage; use crate::ln::types::{ChannelId, PaymentHash, PaymentPreimage, PaymentSecret}; use crate::ln::channel::{self, Channel, ChannelPhase, ChannelContext, ChannelError, ChannelUpdateStatus, ShutdownResult, UnfundedChannelContext, UpdateFulfillCommitFetch, OutboundV1Channel, InboundV1Channel, WithChannelContext}; use crate::ln::channel_state::ChannelDetails; @@ -76,8 +77,8 @@ use crate::offers::static_invoice::StaticInvoice; use crate::onion_message::async_payments::{AsyncPaymentsMessage, HeldHtlcAvailable, ReleaseHeldHtlc, AsyncPaymentsMessageHandler}; use crate::onion_message::messenger::{Destination, MessageRouter, Responder, ResponseInstruction, MessageSendInstructions}; use crate::onion_message::offers::{OffersMessage, OffersMessageHandler}; -use crate::sign::{EntropySource, NodeSigner, Recipient, SignerProvider}; use crate::sign::ecdsa::EcdsaChannelSigner; +use crate::sign::{EntropySource, NodeSigner, Recipient, SignerProvider}; use crate::util::config::{UserConfig, ChannelConfig, ChannelConfigUpdate}; use crate::util::wakers::{Future, Notifier}; use crate::util::scid_utils::fake_scid; @@ -1168,9 +1169,24 @@ pub(super) struct PeerState where SP::Target: SignerProvider { /// [`ChannelMessageHandler::peer_connected`] and no corresponding /// [`ChannelMessageHandler::peer_disconnected`]. pub is_connected: bool, + /// Holds the peer storage data for the channel partner on a per-peer basis. + peer_storage: Vec, } impl PeerState where SP::Target: SignerProvider { + pub fn new(features: &InitFeatures) -> Self { + Self { + channel_by_id: new_hash_map(), + inbound_channel_request_by_id: new_hash_map(), + latest_features: features.clone(), + pending_msg_events: Vec::new(), + in_flight_monitor_updates: BTreeMap::new(), + monitor_update_blocked_actions: BTreeMap::new(), + actions_blocking_raa_monitor_updates: BTreeMap::new(), + is_connected: true, + peer_storage: Vec::new(), + } + } /// Indicates that a peer meets the criteria where we're ok to remove it from our storage. /// If true is passed for `require_disconnected`, the function will return false if we haven't /// disconnected from the node already, ie. `PeerState::is_connected` is set to `true`. @@ -7001,6 +7017,7 @@ where monitor_update_blocked_actions: BTreeMap::new(), actions_blocking_raa_monitor_updates: BTreeMap::new(), is_connected: false, + peer_storage: Vec::new(), })); let mut peer_state = peer_state_mutex.lock().unwrap(); @@ -7861,6 +7878,97 @@ where } } + fn internal_peer_storage(&self, counterparty_node_id: &PublicKey, msg: &msgs::PeerStorageMessage) { + let per_peer_state = self.per_peer_state.read().unwrap(); + let peer_state_mutex = match per_peer_state.get(counterparty_node_id) { + Some(peer_state_mutex) => peer_state_mutex, + None => return, + }; + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + let logger = WithContext::from(&self.logger, Some(*counterparty_node_id), None, None); + + // Check if we have any channels with the peer (Currently we only provide the servie to peers we have a channel with). + if !peer_state.channel_by_id.values().any(|phase| matches!(phase, ChannelPhase::Funded(_))) { + log_debug!(logger, "We do not have any channel with {}", log_pubkey!(counterparty_node_id)); + return; + } + + #[cfg(not(test))] + if msg.data.len() > 1024 { + log_debug!(logger, "We do not allow more than 1 KiB of data for each peer in peer storage. Sending warning to peer {}", log_pubkey!(counterparty_node_id)); + peer_state.pending_msg_events.push(events::MessageSendEvent::HandleError { + node_id: counterparty_node_id.clone(), + action: msgs::ErrorAction::SendWarningMessage { + msg: msgs::WarningMessage { + channel_id: ChannelId([0; 32]), + data: "Supports only data up to 1 KiB in peer storage.".to_owned() + }, + log_level: Level::Trace, + } + }); + return; + } + + log_trace!(logger, "Received Peer Storage from {}", log_pubkey!(counterparty_node_id)); + peer_state.peer_storage = msg.data.clone(); + } + + fn internal_your_peer_storage(&self, counterparty_node_id: &PublicKey, msg: &msgs::YourPeerStorageMessage) { + let logger = WithContext::from(&self.logger, Some(*counterparty_node_id), None, None); + if msg.data.len() < 16 { + log_debug!(logger, "Invalid YourPeerStorage received from {}", log_pubkey!(counterparty_node_id)); + return; + } + + let mut res = vec![0; msg.data.len() - 16]; + let our_peerstorage_encryption_key = self.node_signer.get_peer_storage_key(); + let mut cyphertext_with_key = Vec::with_capacity(msg.data.len() + our_peerstorage_encryption_key.len()); + cyphertext_with_key.extend(msg.data.clone()); + cyphertext_with_key.extend_from_slice(&our_peerstorage_encryption_key); + + match OurPeerStorage::decrypt_our_peer_storage(&mut res, cyphertext_with_key.as_slice()) { + Ok(()) => { + // Decryption successful, the plaintext is now stored in `res`. + log_debug!(logger, "Received a peer storage from peer {}", log_pubkey!(counterparty_node_id)); + } + Err(_) => { + log_debug!(logger, "Invalid YourPeerStorage received from {}", log_pubkey!(counterparty_node_id)); + return; + } + } + + let our_peer_storage = ::read(&mut ::bitcoin::io::Cursor::new(res)).unwrap(); + let per_peer_state = self.per_peer_state.read().unwrap(); + + for ((node_id, channel_id), min_seen_secret) in our_peer_storage.get_cid_and_min_seen_secret().unwrap() { + let peer_state_mutex = match per_peer_state.get(&node_id) { + Some(mutex) => mutex, + None => { + log_debug!(logger, "Not able to find peer_state for the counterparty {}, channelId {}", log_pubkey!(node_id), channel_id); + continue; + } + }; + + let peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &*peer_state_lock; + + match peer_state.channel_by_id.get(&channel_id) { + Some(ChannelPhase::Funded(chan)) => { + if chan.context.get_commitment_secret().get_min_seen_secret() > min_seen_secret { + panic!("Lost channel state for channel {}. + Received peer storage with a more recent state than what our node had. + Use the FundRecoverer to initiate a force close and sweep the funds.", channel_id); + } + }, + Some(_) => {} + None => { + continue; + } + } + } + } + fn internal_funding_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::FundingSigned) -> Result<(), MsgHandleErrInternal> { let best_block = *self.best_block.read().unwrap(); let per_peer_state = self.per_peer_state.read().unwrap(); @@ -10471,6 +10579,16 @@ where let _ = handle_error!(self, self.internal_funding_signed(&counterparty_node_id, msg), counterparty_node_id); } + fn handle_peer_storage(&self, counterparty_node_id: PublicKey, msg: &msgs::PeerStorageMessage) { + let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || NotifyOption::SkipPersistNoEvents); + self.internal_peer_storage(&counterparty_node_id, msg); + } + + fn handle_your_peer_storage(&self, counterparty_node_id: PublicKey, msg: &msgs::YourPeerStorageMessage) { + let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || NotifyOption::SkipPersistNoEvents); + self.internal_your_peer_storage(&counterparty_node_id, msg); + } + fn handle_channel_ready(&self, counterparty_node_id: PublicKey, msg: &msgs::ChannelReady) { // Note that we never need to persist the updated ChannelManager for an inbound // channel_ready message - while the channel's state will change, any channel_ready message @@ -10736,6 +10854,10 @@ where &events::MessageSendEvent::SendShortIdsQuery { .. } => false, &events::MessageSendEvent::SendReplyChannelRange { .. } => false, &events::MessageSendEvent::SendGossipTimestampFilter { .. } => false, + + // Peer Storage + &events::MessageSendEvent::SendPeerStorageMessage { .. } => false, + &events::MessageSendEvent::SendYourPeerStorageMessage { .. } => false, } }); debug_assert!(peer_state.is_connected, "A disconnected peer cannot disconnect"); @@ -10787,6 +10909,7 @@ where monitor_update_blocked_actions: BTreeMap::new(), actions_blocking_raa_monitor_updates: BTreeMap::new(), is_connected: true, + peer_storage: Vec::new(), })); }, hash_map::Entry::Occupied(e) => { @@ -10816,6 +10939,16 @@ where let peer_state = &mut *peer_state_lock; let pending_msg_events = &mut peer_state.pending_msg_events; + if !peer_state.peer_storage.is_empty() { + pending_msg_events.push(events::MessageSendEvent::SendYourPeerStorageMessage { + node_id: counterparty_node_id.clone(), + msg: msgs::YourPeerStorageMessage { + data: peer_state.peer_storage.clone() + }, + }); + } + + for (_, phase) in peer_state.channel_by_id.iter_mut() { match phase { ChannelPhase::Funded(chan) => { @@ -11415,6 +11548,7 @@ pub fn provided_init_features(config: &UserConfig) -> InitFeatures { features.set_scid_privacy_optional(); features.set_zero_conf_optional(); features.set_route_blinding_optional(); + features.set_provide_peer_backup_storage_optional(); if config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx { features.set_anchors_zero_fee_htlc_tx_optional(); } @@ -11900,6 +12034,12 @@ where if !peer_state.ok_to_remove(false) { peer_pubkey.write(writer)?; peer_state.latest_features.write(writer)?; + + (peer_state.peer_storage.len() as u64).write(writer)?; + for p in peer_state.peer_storage.iter() { + p.write(writer)?; + } + if !peer_state.monitor_update_blocked_actions.is_empty() { monitor_update_blocked_actions_per_peer .get_or_insert_with(Vec::new) @@ -12255,6 +12395,10 @@ where funding_txo_to_channel_id.insert(funding_txo, channel.context.channel_id()); funding_txo_set.insert(funding_txo.clone()); if let Some(ref mut monitor) = args.channel_monitors.get_mut(&funding_txo) { + if monitor.get_latest_update_id() == STUB_CHANNEL_UPDATE_IDENTIFIER { + panic!("ChannelMonitor for {} is stale and recovered from Peer Storage, it is not safe to run the node in normal mode.", monitor.channel_id()); + } + if channel.get_cur_holder_commitment_transaction_number() > monitor.get_cur_holder_commitment_number() || channel.get_revoked_counterparty_commitment_transaction_number() > monitor.get_min_seen_secret() || channel.get_cur_counterparty_commitment_transaction_number() > monitor.get_cur_counterparty_commitment_number() || @@ -12415,6 +12559,7 @@ where monitor_update_blocked_actions: BTreeMap::new(), actions_blocking_raa_monitor_updates: BTreeMap::new(), is_connected: false, + peer_storage: Vec::new(), } }; @@ -12425,6 +12570,15 @@ where let peer_chans = funded_peer_channels.remove(&peer_pubkey).unwrap_or(new_hash_map()); let mut peer_state = peer_state_from_chans(peer_chans); peer_state.latest_features = Readable::read(reader)?; + + let peer_storage_count:u64 = Readable::read(reader)?; + let mut peer_storage: Vec = Vec::with_capacity(cmp::min(peer_storage_count as usize, MAX_ALLOC_SIZE/mem::size_of::())); + for i in 0..peer_storage_count { + let x = Readable::read(reader)?; + peer_storage.insert(i as usize, x); + } + peer_state.peer_storage = peer_storage; + per_peer_state.insert(peer_pubkey, Mutex::new(peer_state)); } diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index 7776966b285..6743664138c 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -854,6 +854,12 @@ macro_rules! get_htlc_update_msgs { /// such messages are intended to all peers. pub fn remove_first_msg_event_to_node(msg_node_id: &PublicKey, msg_events: &mut Vec) -> MessageSendEvent { let ev_index = msg_events.iter().position(|e| { match e { + MessageSendEvent::SendPeerStorageMessage { node_id, .. } => { + node_id == msg_node_id + }, + MessageSendEvent::SendYourPeerStorageMessage { node_id, .. } => { + node_id == msg_node_id + }, MessageSendEvent::SendAcceptChannel { node_id, .. } => { node_id == msg_node_id }, @@ -3568,6 +3574,12 @@ macro_rules! get_chan_reestablish_msgs { } else if let MessageSendEvent::SendChannelAnnouncement { ref node_id, ref msg, .. } = msg { assert_eq!(*node_id, $dst_node.node.get_our_node_id()); announcements.insert(msg.contents.short_channel_id); + } else if let MessageSendEvent::SendPeerStorageMessage { ref node_id, ref msg } = msg { + $dst_node.node.handle_peer_storage($src_node.node.get_our_node_id(), msg); + assert_eq!(*node_id, $dst_node.node.get_our_node_id()); + } else if let MessageSendEvent::SendYourPeerStorageMessage { ref node_id, ref msg } = msg { + $dst_node.node.handle_your_peer_storage($src_node.node.get_our_node_id(), msg); + assert_eq!(*node_id, $dst_node.node.get_our_node_id()); } else { panic!("Unexpected event") } diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index 31346c6b78b..d6eee70d5e8 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -14,17 +14,19 @@ use crate::chain; use crate::chain::{ChannelMonitorUpdateStatus, Confirm, Listen, Watch}; use crate::chain::chaininterface::LowerBoundedFeeEstimator; -use crate::chain::channelmonitor; +use crate::chain::chainmonitor::Persist; +use crate::chain::{channelmonitor, BestBlock}; use crate::chain::channelmonitor::{CLOSED_CHANNEL_UPDATE_ID, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY}; use crate::chain::transaction::OutPoint; -use crate::sign::{ecdsa::EcdsaChannelSigner, EntropySource, OutputSpender, SignerProvider}; +use crate::sign::{ecdsa::EcdsaChannelSigner, EntropySource, OutputSpender, SignerProvider, SpendableOutputDescriptor}; use crate::events::{Event, FundingInfo, MessageSendEvent, MessageSendEventsProvider, PathFailure, PaymentPurpose, ClosureReason, HTLCDestination, PaymentFailureReason}; use crate::ln::types::{ChannelId, PaymentPreimage, PaymentSecret, PaymentHash}; use crate::ln::channel::{CONCURRENT_INBOUND_HTLC_FEE_BUFFER, FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE, MIN_AFFORDABLE_HTLC_COUNT, get_holder_selected_channel_reserve_satoshis, OutboundV1Channel, InboundV1Channel, COINBASE_MATURITY, ChannelPhase}; -use crate::ln::channelmanager::{self, PaymentId, RAACommitmentOrder, PaymentSendFailure, RecipientOnionFields, BREAKDOWN_TIMEOUT, ENABLE_GOSSIP_TICKS, DISABLE_GOSSIP_TICKS, MIN_CLTV_EXPIRY_DELTA}; +use crate::ln::channelmanager::{self, ChainParameters, PaymentId, RAACommitmentOrder, PaymentSendFailure, RecipientOnionFields, BREAKDOWN_TIMEOUT, ENABLE_GOSSIP_TICKS, DISABLE_GOSSIP_TICKS, MIN_CLTV_EXPIRY_DELTA}; use crate::ln::channel::{DISCONNECT_PEER_AWAITING_RESPONSE_TICKS, ChannelError}; use crate::ln::{chan_utils, onion_utils}; use crate::ln::chan_utils::{commitment_tx_base_weight, COMMITMENT_TX_WEIGHT_PER_HTLC, OFFERED_HTLC_SCRIPT_WEIGHT, htlc_success_tx_weight, htlc_timeout_tx_weight, HTLCOutputInCommitment}; +use crate::ln::fundrecoverer::{FundRecoverer, RecoveryEvent}; use crate::routing::gossip::{NetworkGraph, NetworkUpdate}; use crate::routing::router::{Path, PaymentParameters, Route, RouteHop, get_route, RouteParameters}; use crate::ln::features::{ChannelFeatures, ChannelTypeFeatures, NodeFeatures}; @@ -174,6 +176,166 @@ fn test_funding_exceeds_no_wumbo_limit() { } } +#[test] +fn test_peer_storage() { + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let (persister, chain_monitor); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let nodes_0_deserialized; + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let nodes_0_serialized = nodes[0].node.encode(); + + let (_a, _b, _cid, _funding_tx) = create_announced_chan_between_nodes(&nodes, 0, 1); + + let msg_events_a = nodes[0].chain_monitor.chain_monitor.get_and_clear_pending_msg_events(); + for msg in msg_events_a { + if let MessageSendEvent::SendPeerStorageMessage { node_id: _, ref msg } = msg { + nodes[1].node.handle_peer_storage(nodes[0].node.get_our_node_id(), msg); + } else { + panic!("Unexpected event"); + } + } + + let msg_events_b = nodes[1].chain_monitor.chain_monitor.get_and_clear_pending_msg_events(); + for msg in msg_events_b { + if let MessageSendEvent::SendPeerStorageMessage { node_id: _, ref msg } = msg { + nodes[0].node.handle_peer_storage(nodes[1].node.get_our_node_id(), msg); + } else { + panic!("Unexpected event"); + } + } + + send_payment(&nodes[0], &vec!(&nodes[1])[..], 1000); + send_payment(&nodes[0], &vec!(&nodes[1])[..], 10000); + send_payment(&nodes[0], &vec!(&nodes[1])[..], 9999); + + nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id()); + nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id()); + + // Reconnect peers + nodes[0].node.peer_connected(nodes[1].node.get_our_node_id(), &msgs::Init { + features: nodes[1].node.init_features(), networks: None, remote_network_address: None + }, true).unwrap(); + let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]); + assert_eq!(reestablish_1.len(), 1); + nodes[1].node.peer_connected(nodes[0].node.get_our_node_id(), &msgs::Init { + features: nodes[0].node.init_features(), networks: None, remote_network_address: None + }, false).unwrap(); + let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]); + assert_eq!(reestablish_2.len(), 1); + + nodes[0].node.handle_channel_reestablish(nodes[1].node.get_our_node_id(), &reestablish_2[0]); + handle_chan_reestablish_msgs!(nodes[0], nodes[1]); + nodes[1].node.handle_channel_reestablish(nodes[0].node.get_our_node_id(), &reestablish_1[0]); + handle_chan_reestablish_msgs!(nodes[1], nodes[0]); + + nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id()); + nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id()); + + // Lets drop the monitor and clear the chain_monitor as well. + nodes[0].chain_source.clear_watched_txn_and_outputs(); + reload_node!(nodes[0], test_default_channel_config(), &nodes_0_serialized, &[], persister, chain_monitor, nodes_0_deserialized); + let persister: &dyn Persist = &chanmon_cfgs[0].persister; + + let fundrecoverer + = FundRecoverer::new(node_cfgs[0].keys_manager, node_cfgs[0].logger,test_default_channel_config(), ChainParameters {network: Network::Testnet, + best_block: BestBlock::from_network(Network::Testnet)}, node_cfgs[0].keys_manager, node_cfgs[0].keys_manager, Some(&chanmon_cfgs[0].chain_source), + persister, node_cfgs[0].fee_estimator, node_cfgs[0].tx_broadcaster, Vec::new()); + + fundrecoverer.peer_connected(nodes[1].node.get_our_node_id(), &msgs::Init { + features: nodes[0].node.init_features(), networks: None, remote_network_address: None + }, true).unwrap(); + + nodes[1].node.peer_connected(nodes[0].node.get_our_node_id(), &msgs::Init { + features: nodes[0].node.init_features(), networks: None, remote_network_address: None + }, true).unwrap(); + let msg_events = nodes[1].node.get_and_clear_pending_msg_events(); + // 0th - SendYourPeerStorageMessage + // 1st - SendChannelReestablish + assert_eq!(msg_events.len(), 2); + for msg in msg_events { + if let MessageSendEvent::SendChannelReestablish { ref node_id, ref msg } = msg { + fundrecoverer.handle_channel_reestablish(nodes[1].node.get_our_node_id(), msg); + assert_eq!(*node_id, nodes[0].node.get_our_node_id()); + } else if let MessageSendEvent::SendYourPeerStorageMessage { ref node_id, ref msg } = msg { + fundrecoverer.handle_your_peer_storage(nodes[1].node.get_our_node_id(), msg); + assert_eq!(*node_id, nodes[0].node.get_our_node_id()); + } else { + panic!("Unexpected event") + } + } + + let recovery_event = fundrecoverer.get_and_clear_recovery_pending_events(); + assert_eq!(recovery_event.len(), 1); + match recovery_event[0] { + RecoveryEvent::RescanBlock{..} => {}, + }; + + let bogus_chan_reestablish = fundrecoverer.get_and_clear_pending_msg_events(); + // We receive two `channel_reestablish`(bogus) messages: the first from `handle_your_peer_storage` and the second from `handle_channel_reestablish`. + assert_eq!(bogus_chan_reestablish.len(), 2); + match bogus_chan_reestablish[0] { + MessageSendEvent::SendChannelReestablish {ref node_id, ref msg} => { + assert_eq!(nodes[1].node.get_our_node_id(), *node_id); + nodes[1].node.handle_channel_reestablish(nodes[0].node.get_our_node_id(), msg); + }, + _ => panic!("Unexpected event"), + } + + let commitment_tx = { + let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); + assert_eq!(node_txn.len(), 1); + node_txn.remove(0) + }; + + let block = create_dummy_block(nodes[1].best_block_hash(), 42, vec![commitment_tx.clone()]); + connect_block(&nodes[1], &block); + // Since we are using fundrecoverer as Chain::watch. + let txdata: Vec<_> = block.txdata.iter().enumerate().collect(); + let height = nodes[0].best_block_info().1 + 1; + fundrecoverer.best_block_updated(&block.header, height); + fundrecoverer.transactions_confirmed(&block.header, &txdata, height); + + check_closed_broadcast!(nodes[1], true); + + let events = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + match events[0] { + Event::ChannelClosed {..} => {}, // If we actually processed we'd receive the payment + _ => panic!("Unexpected event"), + } + let mut dummy_block = create_dummy_block(nodes[1].best_block_hash(), height, Vec::new()); + for i in 1..CHAN_CONFIRM_DEPTH { + let prev_blockhash = dummy_block.header.block_hash(); + let dummy_txdata: Vec<_> = dummy_block.txdata.iter().enumerate().collect(); + fundrecoverer.best_block_updated(&dummy_block.header, height + i + 1); + fundrecoverer.transactions_confirmed(&dummy_block.header, &dummy_txdata, height + i + 1); + dummy_block = create_dummy_block(prev_blockhash, height + i + 1, Vec::new()); + } + + // Clearing chain source so that the `drop` doesn't panic. + nodes[0].chain_source.clear_watched_txn_and_outputs(); + + check_added_monitors!(nodes[1], 1); + + for event in fundrecoverer.get_and_clear_pending_events() { + match event { + Event::SpendableOutputs { mut outputs, channel_id: _ } => { + for outp in outputs.drain(..) { + match outp { + SpendableOutputDescriptor::StaticPaymentOutput(static_payment) => { + assert_eq!(static_payment.output.value.to_sat(), commitment_tx.output[0].value.to_sat()); + }, + _ => panic!("Unexpected event"), + } + } + }, + _ => panic!("Unexpected event"), + }; + } +} + fn do_test_counterparty_no_reserve(send_from_initiator: bool) { // A peer providing a channel_reserve_satoshis of 0 (or less than our dust limit) is insecure, // but only for them. Because some LSPs do it with some level of trust of the clients (for a @@ -4541,6 +4703,201 @@ macro_rules! check_spendable_outputs { } } +#[test] +fn test_peer_storage_on_revoked_txn() { + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let (persister, chain_monitor); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let nodes_0_deserialized; + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let nodes_0_serialized = nodes[0].node.encode(); + + let (_a, _b, channel_id, funding_tx) = create_announced_chan_between_nodes(&nodes, 1, 0); + + let msg_events_a = nodes[0].chain_monitor.chain_monitor.get_and_clear_pending_msg_events(); + for msg in msg_events_a { + if let MessageSendEvent::SendPeerStorageMessage { node_id: _, ref msg } = msg { + nodes[1].node.handle_peer_storage(nodes[0].node.get_our_node_id(), msg); + } else { + panic!("Unexpected event"); + } + } + + let msg_events_b = nodes[1].chain_monitor.chain_monitor.get_and_clear_pending_msg_events(); + for msg in msg_events_b { + if let MessageSendEvent::SendPeerStorageMessage { node_id: _, ref msg } = msg { + nodes[0].node.handle_peer_storage(nodes[1].node.get_our_node_id(), msg); + } else { + panic!("Unexpected event"); + } + } + + send_payment(&nodes[1], &vec!(&nodes[0])[..], 10000); + send_payment(&nodes[1], &vec!(&nodes[0])[..], 10000); + + let revoked_local_txn = get_local_commitment_txn!(nodes[1], channel_id); + assert_eq!(revoked_local_txn[0].input.len(), 1); + assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, funding_tx.compute_txid()); + + send_payment(&nodes[1], &vec!(&nodes[0])[..], 10000); + + connect_blocks(&nodes[1], 2); + connect_blocks(&nodes[0], 2); + + let msg_events_aa = nodes[1].chain_monitor.chain_monitor.get_and_clear_pending_msg_events(); + for msg in msg_events_aa { + if let MessageSendEvent::SendPeerStorageMessage { node_id: _, ref msg } = msg { + nodes[0].node.handle_peer_storage(nodes[1].node.get_our_node_id(), msg); + } else { + panic!("Unexpected event"); + } + } + let msg_events_bb = nodes[0].chain_monitor.chain_monitor.get_and_clear_pending_msg_events(); + for msg in msg_events_bb { + if let MessageSendEvent::SendPeerStorageMessage { node_id: _, ref msg } = msg { + nodes[1].node.handle_peer_storage(nodes[0].node.get_our_node_id(), msg); + } else { + panic!("Unexpected event"); + } + } + + nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id()); + nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id()); + + // Reconnect peers + nodes[0].node.peer_connected(nodes[1].node.get_our_node_id(), &msgs::Init { + features: nodes[1].node.init_features(), networks: None, remote_network_address: None + }, true).unwrap(); + let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]); + assert_eq!(reestablish_1.len(), 1); + nodes[1].node.peer_connected(nodes[0].node.get_our_node_id(), &msgs::Init { + features: nodes[0].node.init_features(), networks: None, remote_network_address: None + }, false).unwrap(); + let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]); + assert_eq!(reestablish_2.len(), 1); + + nodes[0].node.handle_channel_reestablish(nodes[1].node.get_our_node_id(), &reestablish_2[0]); + handle_chan_reestablish_msgs!(nodes[0], nodes[1]); + nodes[1].node.handle_channel_reestablish(nodes[0].node.get_our_node_id(), &reestablish_1[0]); + handle_chan_reestablish_msgs!(nodes[1], nodes[0]); + + nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id()); + nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id()); + + // Lets drop the monitor and clear the chain_monitor as well. + nodes[0].chain_source.clear_watched_txn_and_outputs(); + reload_node!(nodes[0], test_default_channel_config(), &nodes_0_serialized, &[], persister, chain_monitor, nodes_0_deserialized); + let persister: &dyn Persist = &chanmon_cfgs[0].persister; + + let fundrecoverer + = FundRecoverer::new(node_cfgs[0].keys_manager, node_cfgs[0].logger,test_default_channel_config(), ChainParameters {network: Network::Testnet, + best_block: BestBlock::from_network(Network::Testnet)}, node_cfgs[0].keys_manager, node_cfgs[0].keys_manager, Some(&chanmon_cfgs[0].chain_source), + persister, node_cfgs[0].fee_estimator, node_cfgs[0].tx_broadcaster, Vec::new()); + + fundrecoverer.peer_connected(nodes[1].node.get_our_node_id(), &msgs::Init { + features: nodes[0].node.init_features(), networks: None, remote_network_address: None + }, true).unwrap(); + + nodes[1].node.peer_connected(nodes[0].node.get_our_node_id(), &msgs::Init { + features: nodes[0].node.init_features(), networks: None, remote_network_address: None + }, true).unwrap(); + let msg_events = nodes[1].node.get_and_clear_pending_msg_events(); + // 0th - SendYourPeerStorageMessage + // 2nd - SendChannelReestablish + assert_eq!(msg_events.len(), 2); + for msg in msg_events { + if let MessageSendEvent::SendChannelReestablish { ref node_id, ref msg } = msg { + fundrecoverer.handle_channel_reestablish(nodes[1].node.get_our_node_id(), msg); + assert_eq!(*node_id, nodes[0].node.get_our_node_id()); + } else if let MessageSendEvent::SendYourPeerStorageMessage { ref node_id, ref msg } = msg { + fundrecoverer.handle_your_peer_storage(nodes[1].node.get_our_node_id(), msg); + assert_eq!(*node_id, nodes[0].node.get_our_node_id()); + } else { + panic!("Unexpected event") + } + } + + let recovery_event = fundrecoverer.get_and_clear_recovery_pending_events(); + assert_eq!(recovery_event.len(), 1); + match recovery_event[0] { + RecoveryEvent::RescanBlock{..} => {}, + }; + + let bogus_chan_reestablish = fundrecoverer.get_and_clear_pending_msg_events(); + + // We receive two `channel_reestablish`(bogus) messages: the first from `handle_your_peer_storage` and the second from `handle_channel_reestablish`. + assert_eq!(bogus_chan_reestablish.len(), 2); + + match bogus_chan_reestablish[0] { + MessageSendEvent::SendChannelReestablish {ref node_id, ref msg} => { + assert_eq!(nodes[1].node.get_our_node_id(), *node_id); + nodes[1].node.handle_channel_reestablish(nodes[0].node.get_our_node_id(), msg); + }, + _ => panic!("Unexpected event"), + } + + let block = create_dummy_block(nodes[0].best_block_hash(), 42, vec![revoked_local_txn[0].clone()]); + connect_block(&nodes[1], &block); + // Since we are using fundrecoverer as Chain::watch. + let txdata: Vec<_> = block.txdata.iter().enumerate().collect(); + let height = nodes[0].best_block_info().1 + 1; + + nodes[0].blocks.lock().unwrap().push((block.clone(), height)); + fundrecoverer.best_block_updated(&block.header, height); + fundrecoverer.transactions_confirmed(&block.header, &txdata, height); + + check_closed_broadcast!(nodes[1], true); + + let events_2 = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(events_2.len(), 1); + match events_2[0] { + Event::ChannelClosed {..} => {}, // If we actually processed we'd receive the payment + _ => panic!("Unexpected event"), + } + check_added_monitors!(nodes[1], 1); + + let panelty = node_cfgs[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); + assert_eq!(panelty.len(), 1); + assert_eq!(panelty[0].input.len(), 1); + + let block = create_dummy_block(nodes[1].best_block_hash(), 42, vec![panelty[0].clone()]); + let txdata: Vec<_> = block.txdata.iter().enumerate().collect(); + connect_block(&nodes[1], &block); + + nodes[0].blocks.lock().unwrap().push((block.clone(), height + 1)); + fundrecoverer.best_block_updated(&block.header, height + 1); + fundrecoverer.transactions_confirmed(&block.header, &txdata, height + 1); + + let mut dummy_block = create_dummy_block(nodes[1].best_block_hash(), height, Vec::new()); + for i in 1..CHAN_CONFIRM_DEPTH { + let prev_blockhash = dummy_block.header.block_hash(); + let dummy_txdata: Vec<_> = dummy_block.txdata.iter().enumerate().collect(); + fundrecoverer.best_block_updated(&dummy_block.header, height + i + 1); + fundrecoverer.transactions_confirmed(&dummy_block.header, &dummy_txdata, height + i + 1); + dummy_block = create_dummy_block(prev_blockhash, height + i + 1, Vec::new()); + } + + // Lets drop the monitor and clear the chain_monitor as well. + nodes[0].chain_source.clear_watched_txn_and_outputs(); + + for event in fundrecoverer.get_and_clear_pending_events() { + match event { + Event::SpendableOutputs { mut outputs, channel_id: _ } => { + for outp in outputs.drain(..) { + match outp { + SpendableOutputDescriptor::StaticOutput{output, ..} => { + assert_eq!(output.value.to_sat(), panelty[0].output[0].value.to_sat()); + }, + _ => panic!("Unexpected event"), + } + } + }, + _ => panic!("Unexpected event"), + }; + } +} + #[test] fn test_claim_sizeable_push_msat() { // Incidentally test SpendableOutput event generation due to detection of to_local output on commitment tx diff --git a/lightning/src/ln/fundrecoverer.rs b/lightning/src/ln/fundrecoverer.rs new file mode 100644 index 00000000000..253a3218220 --- /dev/null +++ b/lightning/src/ln/fundrecoverer.rs @@ -0,0 +1,887 @@ +use bitcoin::constants::ChainHash; + +use crate::chain::{self}; +use crate::events::{MessageSendEvent, MessageSendEventsProvider}; +use crate::ln::channelmanager::{ + provided_init_features, provided_node_features, ChainParameters, PeerState, +}; +use crate::ln::features::{InitFeatures, NodeFeatures}; +use crate::ln::msgs; +use crate::ln::msgs::{ChannelMessageHandler, DecodeError}; +use crate::ln::script::ShutdownScript; +use crate::ln::types::ChannelId; +use bitcoin::block::Header; +use bitcoin::hash_types::{BlockHash, Txid}; +use bitcoin::{secp256k1, ScriptBuf}; +use bitcoin::secp256k1::{Secp256k1, PublicKey}; + +use crate::chain::chaininterface::{BroadcasterInterface, FeeEstimator}; +use crate::chain::chainmonitor::{ + process_chain_data_util, LockedChannelMonitor, MonitorHolder, Persist, +}; +use crate::chain::channelmonitor::{ + ChannelMonitor, TransactionOutputs, STUB_CHANNEL_UPDATE_IDENTIFIER, + read_util, ReadUtilOpt, get_stub_channel_info_from_ser_channel +}; +use crate::chain::transaction::{OutPoint, TransactionData}; +use crate::chain::{BestBlock, ChannelMonitorUpdateStatus}; +use crate::crypto::chacha20poly1305rfc::ChaCha20Poly1305RFC; + +use crate::events::{self, EventHandler, ReplayEvent}; + +use crate::ln::our_peer_storage::OurPeerStorage; +use crate::sign::ecdsa::EcdsaChannelSigner; +use crate::sign::{EntropySource, NodeSigner, SignerProvider}; +use crate::sync::RwLock; +use crate::util::config::UserConfig; +use crate::util::logger::{Logger, WithContext}; +use crate::util::ser::Readable; +use crate::util::wakers::Notifier; +use core::sync::atomic::AtomicUsize; + +use crate::prelude::*; +use crate::sync::{Arc, FairRwLock, Mutex}; +use core::cell::RefCell; +use core::ops::Deref; + +pub use crate::ln::outbound_payment::{ + Bolt12PaymentError, PaymentSendFailure, ProbeSendFailure, RecipientOnionFields, Retry, + RetryableSendFailure, +}; + +/// Represents events related to recovering channel funds. +/// +/// This enum defines the types of recovery actions required to restore channel +/// functionality or recover funds. It is primarily used during offline operation +/// or when reinitializing channel monitors. +/// +/// # Variants +/// +/// - `RescanBlock`: +/// Triggers a blockchain rescan starting from a specific block to identify +/// relevant transactions for channel recovery. +#[derive(Clone, Debug, PartialEq, Eq)] +pub enum RecoveryEvent { + /// `rescan_from`: The [`BestBlock`] indicating the starting point for the rescan. + RescanBlock { + rescan_from: BestBlock + }, +} + +/// A trait for handling recovery-related events during [`ChannelMonitor`] restoration. +/// +/// Implementations of this trait define how to process specific [`RecoveryEvent`]s, +/// which typically arise during reinitialization of [`ChannelMonitor`] through peer storage. +/// +/// Required Method +/// +/// `handle_recovery_event`: Handles a given [`RecoveryEvent`] and determines the appropriate +/// actions, such as rescanning blocks or processing replay events. +pub trait RecoveryHandler { + fn handle_recovery_event(&self, event: RecoveryEvent) -> Result<(), ReplayEvent>; +} + +impl RecoveryHandler for F where F: Fn(RecoveryEvent) -> Result<(), ReplayEvent> { + fn handle_recovery_event(&self, event: RecoveryEvent) -> Result<(), ReplayEvent> { + self(event) + } +} + +impl RecoveryHandler for Arc { + fn handle_recovery_event(&self, event: RecoveryEvent) -> Result<(), ReplayEvent> { + self.deref().handle_recovery_event(event) + } +} + +/// A utility for recovering funds from channels in scenarios where a node operates in offline mode. +/// +/// This works as a mock [`ChannelMessageHandler`] it is used mainly when a user wants to run their node in +/// offline mode i.e. This node won't communicate with any peer except sending a BogusChannelReestablish +/// for all the [`StubChannelMonitors`] being tracked by the [`ChainMonitor`]. +/// +/// [`FundRecoverer`] is parameterized by a number of components to achieve this. +/// - [`chain::Watch`] (typically [`ChainMonitor`]) for on-chain monitoring and enforcement of each +/// channel +/// - [`SignerProvider`] for providing signers whose operations are scoped to individual channels +/// - [`Logger`] for logging operational information of varying degrees +/// +/// Additionally, it implements the following traits: +/// - [`ChannelMessageHandler`] to handle off-chain channel activity from peers +/// - [`MessageSendEventsProvider`] to similarly send such messages to peers +/// +pub struct FundRecoverer< + ChannelSigner: EcdsaChannelSigner, + C: Deref, + SP: Deref, + L: Deref, + NS: Deref, + ES: Deref, + P: Deref, + T: Deref, + F: Deref, +> where + SP::Target: SignerProvider, + NS::Target: NodeSigner, + L::Target: Logger, + ES::Target: EntropySource, + C::Target: chain::Filter, + P::Target: Persist, + T::Target: BroadcasterInterface, + F::Target: FeeEstimator, +{ + default_configuration: UserConfig, + secp_ctx: Secp256k1, + entropy_source: ES, + chain_source: Option, + persister: P, + broadcaster: T, + fee_estimator: F, + + monitors: RwLock>>, + + highest_chain_height: AtomicUsize, + signer_provider: SP, + node_signer: NS, + chain_hash: ChainHash, + /// The key used to encrypt our peer storage that would be sent to our peers. + our_peerstorage_encryption_key: [u8; 32], + per_peer_state: FairRwLock>>>, + + #[cfg(test)] + pub(super) best_block: RwLock, + #[cfg(not(test))] + best_block: RwLock, + + pending_events: Mutex>, + /// A [`Notifier`] used to wake up the background processor in case we have any [`Event`]s for + /// it to give to users (or [`MonitorEvent`]s for `ChannelManager` to process). + event_notifier: Notifier, + + logger: L, +} + +impl< + ChannelSigner: EcdsaChannelSigner, + C: Deref, + SP: Deref, + L: Deref, + NS: Deref, + ES: Deref, + P: Deref, + T: Deref, + F: Deref, + > events::EventsProvider for FundRecoverer +where + SP::Target: SignerProvider, + NS::Target: NodeSigner, + L::Target: Logger, + ES::Target: EntropySource, + C::Target: chain::Filter, + P::Target: Persist, + T::Target: BroadcasterInterface, + F::Target: FeeEstimator, +{ + /// Processes [`SpendableOutputs`] events produced from each [`ChannelMonitor`] upon maturity. + /// + /// For channels featuring anchor outputs, this method will also process [`BumpTransaction`] + /// events produced from each [`ChannelMonitor`] while there is a balance to claim onchain + /// within each channel. As the confirmation of a commitment transaction may be critical to the + /// safety of funds, we recommend invoking this every 30 seconds, or lower if running in an + /// environment with spotty connections, like on mobile. + /// + /// An [`EventHandler`] may safely call back to the provider, though this shouldn't be needed in + /// order to handle these events. + /// + /// [`SpendableOutputs`]: events::Event::SpendableOutputs + /// [`BumpTransaction`]: events::Event::BumpTransaction + fn process_pending_events(&self, handler: H) + where + H::Target: EventHandler, + { + for monitor_state in self.monitors.read().unwrap().values() { + match monitor_state.monitor.process_pending_events(&handler) { + Ok(()) => {}, + Err(ReplayEvent()) => { + self.event_notifier.notify(); + }, + } + } + } +} + +impl< + ChannelSigner: EcdsaChannelSigner, + C: Deref, + SP: Deref, + L: Deref, + NS: Deref, + ES: Deref, + P: Deref, + T: Deref, + F: Deref, + > FundRecoverer +where + SP::Target: SignerProvider, + NS::Target: NodeSigner, + L::Target: Logger, + ES::Target: EntropySource, + C::Target: chain::Filter, + P::Target: Persist, + T::Target: BroadcasterInterface, + F::Target: FeeEstimator, +{ + fn process_pending_recovery_events(&self, handler: RH) + where + RH::Target: RecoveryHandler, + { + let mut events = self.pending_events.lock().unwrap(); + for event in events.drain(..) { + match handler.handle_recovery_event(event) { + Ok(()) => {}, + Err(ReplayEvent()) => { + self.event_notifier.notify(); + }, + } + } + } +} + +impl< + ChannelSigner: EcdsaChannelSigner, + C: Deref, + SP: Deref, + L: Deref, + NS: Deref, + ES: Deref, + P: Deref, + T: Deref, + F: Deref, + > MessageSendEventsProvider for FundRecoverer +where + SP::Target: SignerProvider, + NS::Target: NodeSigner, + L::Target: Logger, + ES::Target: EntropySource, + C::Target: chain::Filter, + P::Target: Persist, + T::Target: BroadcasterInterface, + F::Target: FeeEstimator, +{ + fn get_and_clear_pending_msg_events(&self) -> Vec { + let mut pending_events = Vec::new(); + let events = RefCell::new(Vec::new()); + let per_peer_state = self.per_peer_state.read().unwrap(); + for (_cp_id, peer_state_mutex) in per_peer_state.iter() { + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + if peer_state.pending_msg_events.len() > 0 { + pending_events.append(&mut peer_state.pending_msg_events); + } + } + if !pending_events.is_empty() { + events.replace(pending_events); + } + events.into_inner() + } +} + +impl< + ChannelSigner: EcdsaChannelSigner, + C: Deref, + SP: Deref, + L: Deref, + NS: Deref, + ES: Deref, + P: Deref, + T: Deref, + F: Deref, + > FundRecoverer +where + SP::Target: SignerProvider, + NS::Target: NodeSigner, + L::Target: Logger, + ES::Target: EntropySource, + C::Target: chain::Filter, + P::Target: Persist, + T::Target: BroadcasterInterface, + F::Target: FeeEstimator, +{ + /// Creates a new instance of `FundRecoverer`. + /// This function initializes a `FundRecoverer` with the provided `chain_monitor`, + /// `logger`, configuration, and chain parameters. The `FundRecoverer` is set up with + /// the default configuration and a chain hash derived from the genesis block of the + /// specified network. + pub fn new( + node_signer: NS, logger: L, config: UserConfig, params: ChainParameters, + signer_provider: SP, entropy_source: ES, chain_source: Option, persister: P, + fee_estimator: F, broadcaster: T, monitors: Vec>, + ) -> Self { + let our_peerstorage_encryption_key = node_signer.get_peer_storage_key(); + let mut secp_ctx = Secp256k1::new(); + let mut monitor_map = new_hash_map(); + for monitor in monitors { + let entry = match monitor_map.entry(monitor.get_funding_txo().0) { + hash_map::Entry::Occupied(_) => { + continue; + }, + hash_map::Entry::Vacant(e) => e, + }; + + if let Some(ref chain_source) = chain_source { + monitor.load_outputs_to_watch(chain_source, &logger); + } + + entry + .insert(MonitorHolder { monitor, pending_monitor_updates: Mutex::new(Vec::new()) }); + } + secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes()); + return Self { + default_configuration: config.clone(), + monitors: RwLock::new(monitor_map), + persister, + fee_estimator, + broadcaster, + chain_source, + signer_provider, + entropy_source, + secp_ctx, + highest_chain_height: AtomicUsize::new(0), + best_block: RwLock::new(params.best_block), + node_signer, + our_peerstorage_encryption_key, + pending_events: Mutex::new(Vec::new()), + event_notifier: Notifier::new(), + chain_hash: ChainHash::using_genesis_block(params.network), + per_peer_state: FairRwLock::new(new_hash_map()), + logger, + }; + } + + #[cfg(any(test, feature = "_test_utils"))] + pub fn get_and_clear_pending_events(&self) -> Vec { + use crate::events::EventsProvider; + let events = core::cell::RefCell::new(Vec::new()); + let event_handler = |event: events::Event| Ok(events.borrow_mut().push(event)); + self.process_pending_events(&event_handler); + events.into_inner() + } + + #[cfg(any(test, feature = "_test_utils"))] + pub fn get_and_clear_recovery_pending_events(&self) -> Vec { + let events = core::cell::RefCell::new(Vec::new()); + let event_handler = |event: RecoveryEvent| Ok(events.borrow_mut().push(event)); + self.process_pending_recovery_events(&event_handler); + events.into_inner() + } + + /// Decrypt `OurPeerStorage` using the `key`, result is stored inside the `res`. + /// Returns an error if the the `cyphertext` is not correct. + fn decrypt_our_peer_storage(&self, res: &mut [u8], cyphertext: &[u8]) -> Result<(), ()> { + let key = self.our_peerstorage_encryption_key; + let n = 0u64; + + let mut nonce = [0; 12]; + nonce[4..].copy_from_slice(&n.to_le_bytes()[..]); + + let mut chacha = ChaCha20Poly1305RFC::new(&key, &nonce, b""); + if chacha + .variable_time_decrypt( + &cyphertext[0..cyphertext.len() - 16], + res, + &cyphertext[cyphertext.len() - 16..], + ) + .is_err() + { + return Err(()); + } + Ok(()) + } + + /// Returns a tuple indicating if the ChannelMonitor is stale or missing. + /// - `is_stale`: `(true, false)` if the ChannelMonitor is stale. + /// - `is_missing`: `(false, true)` if the ChannelMonitor is missing. + /// - Both `false` indicates the ChannelMonitor is healthy. + fn stale_or_missing_channel_monitor(&self, funding_outpoint: OutPoint, min_seen_secret: u64) -> (bool, bool) { + let monitor_state = self.monitors.read().unwrap(); + let monitor_holder = monitor_state.get(&funding_outpoint); + + // If monitor doesn't exists. + if !monitor_holder.is_some() { + return (false, true); + } + let monitor = &monitor_holder.unwrap().monitor; + + // If we get an updated peer storage for an existing channel or if the monitor is stale. + if monitor.get_min_seen_secret() > min_seen_secret { + return (true, false); + } + return (false, false); + } + + fn watch_dummy(&self, stub_channel_monitor: ChannelMonitor) { + if let Some(ref chain_source) = self.chain_source { + stub_channel_monitor.load_outputs_to_watch(chain_source, &self.logger); + } + + let mut monitors = self.monitors.write().unwrap(); + let entry = match monitors.entry(stub_channel_monitor.get_funding_txo().0) { + hash_map::Entry::Occupied(mut m) => { + log_error!(self.logger, "Failed to add new channel data: channel monitor for given outpoint is already present"); + // If this one isn't stale we need to update the monitor. + let holder = m.get_mut(); + if holder.monitor.get_min_seen_secret() + > stub_channel_monitor.get_min_seen_secret() + { + holder.monitor.merge_commitment_secret(stub_channel_monitor); + } + return; + }, + hash_map::Entry::Vacant(e) => e, + }; + self.pending_events.lock().unwrap().push(RecoveryEvent::RescanBlock { + rescan_from: stub_channel_monitor.current_best_block(), + }); + + let persist_res = self + .persister + .persist_new_channel(stub_channel_monitor.get_funding_txo().0, &stub_channel_monitor); + + match persist_res { + ChannelMonitorUpdateStatus::InProgress => { + log_info!( + self.logger, + "Persistence of new ChannelMonitor for channel {} in progress", + log_funding_info!(stub_channel_monitor) + ); + }, + ChannelMonitorUpdateStatus::Completed => { + log_info!( + self.logger, + "Persistence of new ChannelMonitor for channel {} completed", + log_funding_info!(stub_channel_monitor) + ); + }, + ChannelMonitorUpdateStatus::UnrecoverableError => { + let err_str = "ChannelMonitor[Update] persistence failed unrecoverably. This indicates we cannot continue normal operation and must shut down."; + log_error!(self.logger, "{}", err_str); + panic!("{}", err_str); + }, + } + entry.insert(MonitorHolder { + monitor: stub_channel_monitor, + pending_monitor_updates: Mutex::new(Vec::new()), + }); + } + + fn process_chain_data( + &self, header: &Header, best_height: Option, txdata: &TransactionData, process: FN, + ) where + FN: Fn(&ChannelMonitor, &TransactionData) -> Vec, + { + process_chain_data_util( + &self.persister, + &self.chain_source, + &self.logger, + &self.monitors, + &self.highest_chain_height, + header, + best_height, + txdata, + process, + ); + } + + /// Lists the funding outpoint and channel ID of each [`ChannelMonitor`] being monitored. + /// + /// Note that [`ChannelMonitor`]s are not removed when a channel is closed as they are always + /// monitoring for on-chain state resolutions. + pub fn list_monitors(&self) -> Vec<(OutPoint, ChannelId)> { + self.monitors + .read() + .unwrap() + .iter() + .map(|(outpoint, monitor_holder)| { + let channel_id = monitor_holder.monitor.channel_id(); + (*outpoint, channel_id) + }) + .collect() + } + + /// Gets the [`LockedChannelMonitor`] for a given funding outpoint, returning an `Err` if no + /// such [`ChannelMonitor`] is currently being monitored for. + /// + /// Note that the result holds a mutex over our monitor set, and should not be held + /// indefinitely. + pub fn get_monitor( + &self, funding_txo: OutPoint, + ) -> Result, ()> { + let lock = self.monitors.read().unwrap(); + if lock.get(&funding_txo).is_some() { + Ok(LockedChannelMonitor { lock, funding_txo }) + } else { + Err(()) + } + } +} + +struct DummySignerProvider { + _marker: std::marker::PhantomData, +} + +struct DummyEntropySource; +impl SignerProvider for DummySignerProvider { + type EcdsaSigner = ChannelSigner; + + fn generate_channel_keys_id( + &self, _inbound: bool, _channel_value_satoshis: u64, _user_channel_id: u128, + ) -> [u8; 32] { + unreachable!() + } + + fn derive_channel_signer( + &self, _channel_value_satoshis: u64, _channel_keys_id: [u8; 32], + ) -> Self::EcdsaSigner { + unreachable!(); + } + + fn read_chan_signer(&self, _reader: &[u8]) -> Result { + unreachable!(); + } + + fn get_destination_script(&self, _channel_keys_id: [u8; 32]) -> Result { + unreachable!(); + } + + fn get_shutdown_scriptpubkey(&self) -> Result { + unreachable!(); + } + +} + +impl EntropySource for DummyEntropySource { + fn get_secure_random_bytes(&self) -> [u8; 32] { + unreachable!(); + } +} + +impl< + ChannelSigner: EcdsaChannelSigner, + C: Deref, + SP: Deref, + L: Deref, + NS: Deref, + ES: Deref, + P: Deref, + T: Deref, + F: Deref, + > ChannelMessageHandler for FundRecoverer +where + SP::Target: SignerProvider, + NS::Target: NodeSigner, + L::Target: Logger, + ES::Target: EntropySource, + C::Target: chain::Filter, + P::Target: Persist, + T::Target: BroadcasterInterface, + F::Target: FeeEstimator, +{ + fn handle_open_channel(&self, _their_node_id: PublicKey, _msg: &msgs::OpenChannel) {} + fn handle_accept_channel(&self, _their_node_id: PublicKey, _msg: &msgs::AcceptChannel) {} + fn handle_funding_created(&self, _their_node_id: PublicKey, _msg: &msgs::FundingCreated) {} + fn handle_funding_signed(&self, _their_node_id: PublicKey, _msg: &msgs::FundingSigned) {} + fn handle_channel_ready(&self, _their_node_id: PublicKey, _msg: &msgs::ChannelReady) {} + fn handle_shutdown(&self, _their_node_id: PublicKey, _msg: &msgs::Shutdown) {} + fn handle_closing_signed(&self, _their_node_id: PublicKey, _msg: &msgs::ClosingSigned) {} + fn handle_update_add_htlc(&self, _their_node_id: PublicKey, _msg: &msgs::UpdateAddHTLC) {} + fn handle_update_fulfill_htlc( + &self, _their_node_id: PublicKey, _msg: &msgs::UpdateFulfillHTLC, + ) { + } + fn handle_update_fail_htlc(&self, _their_node_id: PublicKey, _msg: &msgs::UpdateFailHTLC) {} + fn handle_update_fail_malformed_htlc( + &self, _their_node_id: PublicKey, _msg: &msgs::UpdateFailMalformedHTLC, + ) { + } + fn handle_commitment_signed(&self, _their_node_id: PublicKey, _msg: &msgs::CommitmentSigned) {} + fn handle_revoke_and_ack(&self, _their_node_id: PublicKey, _msg: &msgs::RevokeAndACK) {} + fn handle_update_fee(&self, _their_node_id: PublicKey, _msg: &msgs::UpdateFee) {} + fn handle_announcement_signatures( + &self, _their_node_id: PublicKey, _msg: &msgs::AnnouncementSignatures, + ) { + } + fn handle_channel_update(&self, _their_node_id: PublicKey, _msg: &msgs::ChannelUpdate) {} + fn handle_open_channel_v2(&self, _their_node_id: PublicKey, _msg: &msgs::OpenChannelV2) {} + fn handle_accept_channel_v2(&self, _their_node_id: PublicKey, _msg: &msgs::AcceptChannelV2) {} + fn handle_stfu(&self, _their_node_id: PublicKey, _msg: &msgs::Stfu) {} + #[cfg(splicing)] + fn handle_splice_init(&self, _their_node_id: PublicKey, _msg: &msgs::SpliceInit) {} + #[cfg(splicing)] + fn handle_splice_ack(&self, _their_node_id: PublicKey, _msg: &msgs::SpliceAck) {} + #[cfg(splicing)] + fn handle_splice_locked(&self, _their_node_id: PublicKey, _msg: &msgs::SpliceLocked) {} + fn handle_tx_add_input(&self, _their_node_id: PublicKey, _msg: &msgs::TxAddInput) {} + fn handle_tx_add_output(&self, _their_node_id: PublicKey, _msg: &msgs::TxAddOutput) {} + fn handle_tx_remove_input(&self, _their_node_id: PublicKey, _msg: &msgs::TxRemoveInput) {} + fn handle_tx_remove_output(&self, _their_node_id: PublicKey, _msg: &msgs::TxRemoveOutput) {} + fn handle_tx_complete(&self, _their_node_id: PublicKey, _msg: &msgs::TxComplete) {} + fn handle_tx_signatures(&self, _their_node_id: PublicKey, _msg: &msgs::TxSignatures) {} + fn handle_tx_init_rbf(&self, _their_node_id: PublicKey, _msg: &msgs::TxInitRbf) {} + fn handle_tx_ack_rbf(&self, _their_node_id: PublicKey, _msg: &msgs::TxAckRbf) {} + fn handle_tx_abort(&self, _their_node_id: PublicKey, _msg: &msgs::TxAbort) {} + fn handle_peer_storage(&self, _their_node_id: PublicKey, _msg: &msgs::PeerStorageMessage) {} + + fn handle_your_peer_storage( + &self, counterparty_node_id: PublicKey, msg: &msgs::YourPeerStorageMessage, + ) { + let logger = WithContext::from(&self.logger, Some(counterparty_node_id), None, None); + if msg.data.len() < 16 { + log_debug!( + logger, + "Invalid YourPeerStorage received from {}", + log_pubkey!(counterparty_node_id) + ); + return; + } + + let mut res = vec![0; msg.data.len() - 16]; + { + match self.decrypt_our_peer_storage(&mut res, msg.data.as_slice()) { + Ok(()) => { + // Decryption successful, the plaintext is now stored in `res` + log_debug!( + logger, + "Received a peer storage from peer {}", + log_pubkey!(counterparty_node_id) + ); + }, + Err(_) => { + log_debug!( + logger, + "Invalid YourPeerStorage received from {}", + log_pubkey!(counterparty_node_id) + ); + return; + }, + } + } + + let our_peer_storage = + ::read(&mut ::bitcoin::io::Cursor::new(res)).unwrap(); + + for ((_, _), _) in our_peer_storage.get_cid_and_min_seen_secret().unwrap() { + let chan_reader = &mut ::bitcoin::io::Cursor::new(our_peer_storage.get_ser_channels()); + let num_chan: u64 = Readable::read(chan_reader).unwrap_or_else(|op| panic!("Failed to read num_chan: {:?}", op)); + for _ in 0..num_chan { + let len: u64 = Readable::read(chan_reader).unwrap_or_else(|op| panic!("Failed to read len: {:?}", op)); + let mut chan_bytes: Vec = Vec::with_capacity(len as usize); + for _ in 0..len { + chan_bytes.push(Readable::read(chan_reader).unwrap_or_else(|op| panic!("Failed to read chan_bytes: {:?}", op))); + } + let mut chan_reader = ::bitcoin::io::Cursor::new(chan_bytes); + + match get_stub_channel_info_from_ser_channel(&mut chan_reader) { + Ok(ps_channel) => { + let (stale, missing) = self.stale_or_missing_channel_monitor(ps_channel.funding_outpoint, ps_channel.min_seen_secret); + if stale || missing { + let keys = self.signer_provider.derive_channel_signer( + ps_channel.channel_value_satoshi, + ps_channel.channel_keys_id, + ); + + let (_, monitor) = read_util::<_, ChannelSigner, DummySignerProvider, DummyEntropySource>(&mut chan_reader, + ReadUtilOpt::IsStub{keys, secp_ctx: self.secp_ctx.clone()}).unwrap(); + let cid = monitor.channel_id(); + let channel_partner_node_id = monitor.get_counterparty_node_id().unwrap(); + self.watch_dummy(monitor); + log_debug!( + logger, + "Generating BogusChannelReestablish to force close the channel." + ); + + let per_peer_state = self.per_peer_state.read().unwrap(); + if let Some(peer_state_mutex) = per_peer_state.get(&channel_partner_node_id) { + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + let pending_msg_events = &mut peer_state.pending_msg_events; + pending_msg_events.push(MessageSendEvent::SendChannelReestablish { + node_id: channel_partner_node_id, + msg: msgs::ChannelReestablish { + channel_id: cid, + next_local_commitment_number: 0, + next_remote_commitment_number: 0, + your_last_per_commitment_secret: [1u8; 32], + my_current_per_commitment_point: PublicKey::from_slice(&[2u8; 33]) + .unwrap(), + next_funding_txid: None, + }, + }) + } + } + } + Err(_) => { + panic!("Could not get peer storage"); + } + } + } + } + } + + fn peer_disconnected(&self, _their_node_id: PublicKey) {} + + fn peer_connected( + &self, counterparty_node_id: PublicKey, init_msg: &msgs::Init, _inbound: bool, + ) -> Result<(), ()> { + let logger = WithContext::from(&self.logger, Some(counterparty_node_id), None, None); + + { + let mut peer_state_lock = self.per_peer_state.write().unwrap(); + match peer_state_lock.entry(counterparty_node_id.clone()) { + hash_map::Entry::Vacant(e) => { + e.insert(Mutex::new(PeerState::new(&init_msg.features))); + }, + hash_map::Entry::Occupied(e) => { + let mut peer_state = e.get().lock().unwrap(); + + debug_assert!(!peer_state.is_connected, "A peer shouldn't be connected twice"); + peer_state.is_connected = true; + }, + } + } + + log_debug!(logger, "Connected to node {}", log_pubkey!(counterparty_node_id)); + Ok(()) + } + + fn handle_channel_reestablish( + &self, their_node_id: PublicKey, msg: &msgs::ChannelReestablish, + ) { + let per_peer_state = self.per_peer_state.read().unwrap(); + if let Some(peer_state_mutex) = per_peer_state.get(&their_node_id) { + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + let pending_msg_events = &mut peer_state.pending_msg_events; + for monitor_state in self.monitors.read().unwrap().values() { + if monitor_state.monitor.channel_id() == msg.channel_id && monitor_state.monitor.get_latest_update_id() == STUB_CHANNEL_UPDATE_IDENTIFIER { + pending_msg_events.push(MessageSendEvent::SendChannelReestablish { + node_id: their_node_id, + msg: msgs::ChannelReestablish { + channel_id: msg.channel_id, + next_local_commitment_number: 0, + next_remote_commitment_number: 0, + your_last_per_commitment_secret: [1u8; 32], + my_current_per_commitment_point: PublicKey::from_slice(&[2u8; 33]) + .unwrap(), + next_funding_txid: None, + }, + }) + } + } + } + } + fn handle_error(&self, _their_node_id: PublicKey, _msg: &msgs::ErrorMessage) {} + fn provided_node_features(&self) -> NodeFeatures { + provided_node_features(&self.default_configuration) + } + fn provided_init_features(&self, _their_node_id: PublicKey) -> InitFeatures { + provided_init_features(&self.default_configuration) + } + fn get_chain_hashes(&self) -> Option> { + Some(vec![self.chain_hash]) + } + + fn message_received(&self) {} +} + + +impl< + ChannelSigner: EcdsaChannelSigner, + C: Deref, + SP: Deref, + L: Deref, + NS: Deref, + ES: Deref, + P: Deref, + T: Deref, + F: Deref, + > chain::Confirm for FundRecoverer +where + SP::Target: SignerProvider, + NS::Target: NodeSigner, + L::Target: Logger, + ES::Target: EntropySource, + C::Target: chain::Filter, + P::Target: Persist, + T::Target: BroadcasterInterface, + F::Target: FeeEstimator, +{ + fn transactions_confirmed(&self, header: &Header, txdata: &TransactionData, height: u32) { + log_debug!( + self.logger, + "{} provided transactions confirmed at height {} in block {}", + txdata.len(), + height, + header.block_hash() + ); + + self.process_chain_data(header, None, txdata, |monitor, txdata| { + monitor.transactions_confirmed( + header, + txdata, + height, + &*self.broadcaster, + &*self.fee_estimator, + &self.logger, + ) + }); + // Assume we may have some new events and wake the event processor + self.event_notifier.notify(); + } + + fn transaction_unconfirmed(&self, txid: &Txid) { + log_debug!(self.logger, "Transaction {} reorganized out of chain", txid); + let monitor_states = self.monitors.read().unwrap(); + for monitor_state in monitor_states.values() { + monitor_state.monitor.transaction_unconfirmed( + txid, + &*self.broadcaster, + &*self.fee_estimator, + &self.logger, + ); + } + } + + fn best_block_updated(&self, header: &Header, height: u32) { + log_debug!( + self.logger, + "New best block {} at height {} provided via best_block_updated", + header.block_hash(), + height + ); + self.process_chain_data(header, Some(height), &[], |monitor, txdata| { + // While in practice there shouldn't be any recursive calls when given empty txdata, + // it's still possible if a chain::Filter implementation returns a transaction. + debug_assert!(txdata.is_empty()); + monitor.best_block_updated( + header, + height, + &*self.broadcaster, + &*self.fee_estimator, + &self.logger, + ) + }); + // Assume we may have some new events and wake the event processor + self.event_notifier.notify(); + } + + fn get_relevant_txids(&self) -> Vec<(Txid, u32, Option)> { + let mut txids = Vec::new(); + let monitor_states = self.monitors.read().unwrap(); + for monitor_state in monitor_states.values() { + txids.append(&mut monitor_state.monitor.get_relevant_txids()); + } + + txids.sort_unstable_by(|a, b| a.0.cmp(&b.0).then(b.1.cmp(&a.1))); + txids.dedup_by_key(|(txid, _, _)| *txid); + txids + } +} diff --git a/lightning/src/ln/mod.rs b/lightning/src/ln/mod.rs index dceb52ab4ae..337d9138e94 100644 --- a/lightning/src/ln/mod.rs +++ b/lightning/src/ln/mod.rs @@ -30,6 +30,8 @@ pub mod chan_utils; pub mod features; pub mod script; pub mod types; +pub mod our_peer_storage; +pub mod fundrecoverer; // TODO: These modules were moved from lightning-invoice and need to be better integrated into this // crate now: diff --git a/lightning/src/ln/msgs.rs b/lightning/src/ln/msgs.rs index be5ecb27ae0..14e8f251c10 100644 --- a/lightning/src/ln/msgs.rs +++ b/lightning/src/ln/msgs.rs @@ -723,6 +723,24 @@ pub struct UpdateFulfillHTLC { pub payment_preimage: PaymentPreimage, } +/// A [`PeerStorage`] message to be sent to or received from a peer. +/// +/// [`PeerStorage`]: https://github.com/lightning/bolts/pull/1110 +#[derive(Clone, Debug, Hash, PartialEq, Eq)] +pub struct PeerStorageMessage { + /// Data included in the msg + pub data: Vec, +} + +/// An [`YourPeerStorage`] message to be sent to or received from a peer. +/// +/// [`YourPeerStorage`]: https://github.com/lightning/bolts/pull/1110 +#[derive(Clone, Debug, Hash, PartialEq, Eq)] +pub struct YourPeerStorageMessage { + /// Data included in the msg + pub data: Vec, +} + /// An [`update_fail_htlc`] message to be sent to or received from a peer. /// /// [`update_fail_htlc`]: https://github.com/lightning/bolts/blob/master/02-peer-protocol.md#removing-an-htlc-update_fulfill_htlc-update_fail_htlc-and-update_fail_malformed_htlc @@ -1484,6 +1502,19 @@ pub struct CommitmentUpdate { pub commitment_signed: CommitmentSigned, } +/// A trait for sending messages to peers without handling any incoming messages. +/// +/// This trait is designed to handle outbound-only communication, allowing implementations +/// to send specific types of messages to connected peers identified by their public keys. +/// +/// This trait extends [`MessageSendEventsProvider`], meaning it is capable of generating +/// message send events, which can be processed using +/// [`MessageSendEventsProvider::get_and_clear_pending_msg_events`]. +pub trait SendingOnlyMessageHandler: MessageSendEventsProvider { + /// Send `peer_storage` message to the given peer. + fn send_peer_storage(&self, their_node_id: PublicKey); +} + /// A trait to describe an object which can receive channel messages. /// /// Messages MAY be called in parallel when they originate from different `their_node_ids`, however @@ -1505,6 +1536,12 @@ pub trait ChannelMessageHandler : MessageSendEventsProvider { /// Handle an incoming `channel_ready` message from the given peer. fn handle_channel_ready(&self, their_node_id: PublicKey, msg: &ChannelReady); + // Peer Storage + /// Handle an incoming `peer_storage` message from the given peer. + fn handle_peer_storage(&self, their_node_id: PublicKey, msg: &PeerStorageMessage); + /// Handle an incoming `your_peer_storage` message from the given peer. + fn handle_your_peer_storage(&self, their_node_id: PublicKey, msg: &YourPeerStorageMessage); + // Channel close: /// Handle an incoming `shutdown` message from the given peer. fn handle_shutdown(&self, their_node_id: PublicKey, msg: &Shutdown); @@ -2597,6 +2634,14 @@ impl_writeable_msg!(UpdateFulfillHTLC, { payment_preimage }, {}); +impl_writeable_msg!(PeerStorageMessage, { + data +}, {}); + +impl_writeable_msg!(YourPeerStorageMessage, { + data +}, {}); + // Note that this is written as a part of ChannelManager objects, and thus cannot change its // serialization format in a way which assumes we know the total serialized length/message end // position. @@ -4447,6 +4492,26 @@ mod tests { assert_eq!(encoded_value, target_value); } + #[test] + fn encoding_peer_storage() { + let peerstorage = msgs::PeerStorageMessage { + data: >::from_hex("01020304050607080910").unwrap() + }; + let encoded_value = peerstorage.encode(); + let target_value = >::from_hex("000a01020304050607080910").unwrap(); + assert_eq!(encoded_value, target_value); + } + + #[test] + fn encoding_your_peer_storage() { + let yourpeerstorage = msgs::YourPeerStorageMessage { + data: >::from_hex("01020304050607080910").unwrap() + }; + let encoded_value = yourpeerstorage.encode(); + let target_value = >::from_hex("000a01020304050607080910").unwrap(); + assert_eq!(encoded_value, target_value); + } + #[test] fn encoding_pong() { let pong = msgs::Pong { diff --git a/lightning/src/ln/our_peer_storage.rs b/lightning/src/ln/our_peer_storage.rs new file mode 100644 index 00000000000..792c14a40da --- /dev/null +++ b/lightning/src/ln/our_peer_storage.rs @@ -0,0 +1,167 @@ +use crate::ln::types::ChannelId; +use bitcoin::secp256k1::PublicKey; +use std::collections::HashMap; + +use crate::chain::channelmonitor::get_stub_channel_info_from_ser_channel; +use crate::crypto::chacha20poly1305rfc::ChaCha20Poly1305RFC; + +use crate::util::ser::{ Writeable, VecWriter, Writer, Readable }; + +use crate::prelude::*; +use crate::io::{self, Error}; + +use crate::ln::msgs::DecodeError; + +/// [`OurPeerStorage`] is used to store channel information that allows for the creation of a +/// PeerStorage backup. It includes versioning and timestamping for comparison between +/// instances of [`OurPeerStorage`]. +/// +/// This structure is designed to serialize channel data for backup and supports encryption +/// and decryption to ensure data integrity and security during exchange or storage. +/// +/// # Fields +/// - `version`: Defines the structure's version for backward compatibility. +/// - `timestamp`: UNIX timestamp indicating the creation or modification time of the instance. +/// - `ser_channels`: Serialized channel data. +/// +/// # Key Methods +/// - `new`: Creates a new [`OurPeerStorage`] instance with the current timestamp. +/// - `stub_channels`: Updates the serialized channel data. +/// - `get_ser_channels`: Retrieves the serialized channel data. +/// - `encrypt_our_peer_storage`: Encrypts the storage using a given key and returns the ciphertext. +/// - `decrypt_our_peer_storage`: Decrypts the ciphertext using the key and updates the result buffer. +/// - `get_cid_and_min_seen_secret`: Extracts channel IDs and their corresponding minimum seen +/// secrets from the serialized data. +/// +/// # Usage +/// This structure can be used for securely managing and exchanging peer storage backups. It +/// includes methods for encryption and decryption using `ChaCha20Poly1305RFC`, making it +/// suitable for on-the-wire transmission. +/// +/// ## Example +/// ``` +/// let mut our_peer_storage = OurPeerStorage::new(); +/// our_peer_storage.stub_channels(vec![1, 2, 3]); +/// let key = [0u8; 32]; +/// let encrypted = our_peer_storage.encrypt_our_peer_storage(key); +/// let mut decrypted = vec![0u8; encrypted.len()]; +/// OurPeerStorage::decrypt_our_peer_storage(&mut decrypted, &encrypted).unwrap(); +/// ``` +#[derive(PartialEq)] +pub struct OurPeerStorage { + version: u8, + timestamp: u32, + ser_channels: Vec, +} + +impl OurPeerStorage { + /// Returns a [`OurPeerStorage`] with version 1 and current timestamp. + pub fn new() -> Self { + let duration_since_epoch = std::time::SystemTime::now() + .duration_since(std::time::SystemTime::UNIX_EPOCH) + .expect("Time must be > 1970"); + + Self { + version: 1, + timestamp: duration_since_epoch.as_secs() as u32, + ser_channels: Vec::new(), + } + } + + /// Stubs a channel inside [`OurPeerStorage`] + pub fn stub_channels(&mut self, ser_chan: Vec) { + self.ser_channels = ser_chan; + } + + /// Get `ser_channels` field from [`OurPeerStorage`] + pub fn get_ser_channels(&self) -> Vec { + self.ser_channels.clone() + } + + /// Encrypt [`OurPeerStorage`] using the `key` and return a Vec containing the result. + pub fn encrypt_our_peer_storage(&self, key: [u8; 32]) -> Vec { + let n = 0u64; + let mut peer_storage = VecWriter(Vec::new()); + self.write(&mut peer_storage).unwrap(); + let mut res = vec![0;peer_storage.0.len() + 16]; + + let plaintext = &peer_storage.0[..]; + let mut nonce = [0; 12]; + nonce[4..].copy_from_slice(&n.to_le_bytes()[..]); + + let mut chacha = ChaCha20Poly1305RFC::new(&key, &nonce, b""); + let mut tag = [0; 16]; + chacha.encrypt(plaintext, &mut res[0..plaintext.len()], &mut tag); + res[plaintext.len()..].copy_from_slice(&tag); + res + } + + /// Decrypt `OurPeerStorage` using the `key`, result is stored inside the `res`. + /// Returns an error if the the `cyphertext` is not correct. + pub fn decrypt_our_peer_storage(res: &mut[u8], cyphertext_with_key: &[u8]) -> Result<(), ()> { + const KEY_SIZE: usize = 32; + + // Ensure the combined data is at least as large as the key size + if cyphertext_with_key.len() <= KEY_SIZE { + return Err(()); + } + + let (cyphertext, key) = cyphertext_with_key.split_at(cyphertext_with_key.len() - KEY_SIZE); + let n = 0u64; + let mut nonce = [0; 12]; + nonce[4..].copy_from_slice(&n.to_le_bytes()[..]); + + let mut chacha = ChaCha20Poly1305RFC::new(&key, &nonce, b""); + if chacha.variable_time_decrypt(&cyphertext[0..cyphertext.len() - 16], res, &cyphertext[cyphertext.len() - 16..]).is_err() { + return Err(()); + } + Ok(()) + } + /// We store some channel information before the serialized channel, so that we can get data required to identify stale or missing channelmonitors. + pub fn get_cid_and_min_seen_secret (&self) -> Result, DecodeError> { + let mut cid_min_secret_map = HashMap::new(); + let chan_reader = &mut ::bitcoin::io::Cursor::new(self.ser_channels.clone()); + let num_chan: u64 = Readable::read(chan_reader)?; + for _ in 0..num_chan { + let len: u64 = Readable::read(chan_reader)?; + let mut chan_bytes: Vec = Vec::with_capacity(len as usize); + for _ in 0..len { + chan_bytes.push(Readable::read(chan_reader)?); + } + let mut chan_reader = ::bitcoin::io::Cursor::new(chan_bytes); + match get_stub_channel_info_from_ser_channel(&mut chan_reader) { + Ok(p) => { + cid_min_secret_map.insert((p.counterparty_node_id, p.cid), p.min_seen_secret); + } + Err(_) => { + panic!("Could not get Peer Storage"); + } + } + } + Ok(cid_min_secret_map) + } +} + +impl Writeable for OurPeerStorage { + fn write(&self, writer: &mut W) -> Result<(), Error> { + write_ver_prefix!(writer, self.version, 1); + self.timestamp.write(writer)?; + self.ser_channels.write(writer)?; + Ok(()) + } +} + +impl Readable for OurPeerStorage { + fn read(reader: &mut R) -> Result { + let ver = read_ver_prefix!(reader, 1u8); + let timestamp: u32 = Readable::read(reader)?; + let ser_channels = as Readable>::read(reader)?; + + let ps = OurPeerStorage { + version: ver, + timestamp, + ser_channels, + }; + Ok(ps) + } +} diff --git a/lightning/src/ln/peer_handler.rs b/lightning/src/ln/peer_handler.rs index 64c35835bda..e9fedc36077 100644 --- a/lightning/src/ln/peer_handler.rs +++ b/lightning/src/ln/peer_handler.rs @@ -24,10 +24,11 @@ use crate::events::{MessageSendEvent, MessageSendEventsProvider}; use crate::ln::types::ChannelId; use crate::ln::features::{InitFeatures, NodeFeatures}; use crate::ln::msgs; -use crate::ln::msgs::{ChannelMessageHandler, Init, LightningError, SocketAddress, OnionMessageHandler, RoutingMessageHandler}; +use crate::ln::msgs::{ChannelMessageHandler, Init, LightningError, SocketAddress, OnionMessageHandler, RoutingMessageHandler, SendingOnlyMessageHandler}; use crate::util::ser::{VecWriter, Writeable, Writer}; use crate::ln::peer_channel_encryptor::{PeerChannelEncryptor, NextNoiseStep, MessageBuf, MSG_BUF_ALLOC_SIZE}; use crate::ln::wire; +use crate::chain::chainmonitor::ChainMonitor; use crate::ln::wire::{Encode, Type}; use crate::onion_message::async_payments::{AsyncPaymentsMessageHandler, HeldHtlcAvailable, ReleaseHeldHtlc}; use crate::onion_message::dns_resolution::{DNSResolverMessageHandler, DNSResolverMessage, DNSSECProof, DNSSECQuery}; @@ -142,6 +143,10 @@ impl OnionMessageHandler for IgnoringMessageHandler { } } +impl SendingOnlyMessageHandler for IgnoringMessageHandler { + fn send_peer_storage(&self, _their_node_id: PublicKey) {} +} + impl OffersMessageHandler for IgnoringMessageHandler { fn handle_message(&self, _message: OffersMessage, _context: Option, _responder: Option) -> Option<(OffersMessage, ResponseInstruction)> { None @@ -323,6 +328,8 @@ impl ChannelMessageHandler for ErroringMessageHandler { } // msgs::ChannelUpdate does not contain the channel_id field, so we just drop them. fn handle_channel_update(&self, _their_node_id: PublicKey, _msg: &msgs::ChannelUpdate) {} + fn handle_peer_storage(&self, _their_node_id: PublicKey, _msg: &msgs::PeerStorageMessage) {} + fn handle_your_peer_storage(&self, _their_node_id: PublicKey, _msg: &msgs::YourPeerStorageMessage) {} fn peer_disconnected(&self, _their_node_id: PublicKey) {} fn peer_connected(&self, _their_node_id: PublicKey, _init: &msgs::Init, _inbound: bool) -> Result<(), ()> { Ok(()) } fn handle_error(&self, _their_node_id: PublicKey, _msg: &msgs::ErrorMessage) {} @@ -407,11 +414,12 @@ impl Deref for ErroringMessageHandler { } /// Provides references to trait impls which handle different types of messages. -pub struct MessageHandler where +pub struct MessageHandler where CM::Target: ChannelMessageHandler, RM::Target: RoutingMessageHandler, OM::Target: OnionMessageHandler, CustomM::Target: CustomMessageHandler, + SM::Target: SendingOnlyMessageHandler, { /// A message handler which handles messages specific to channels. Usually this is just a /// [`ChannelManager`] object or an [`ErroringMessageHandler`]. @@ -433,6 +441,9 @@ pub struct MessageHandler where /// A message handler which handles custom messages. The only LDK-provided implementation is /// [`IgnoringMessageHandler`]. pub custom_message_handler: CustomM, + + /// A message handler which only allows sending messages. + pub send_only_message_handler: SM, } /// Provides an object which can be used to send data to and which uniquely identifies a connection @@ -699,7 +710,8 @@ pub type SimpleArcPeerManager = PeerManager< Arc>, Arc, IgnoringMessageHandler, - Arc + Arc, + Arc, Arc, Arc, Arc, Arc, Arc>>, >; /// SimpleRefPeerManager is a type alias for a PeerManager reference, and is the reference @@ -720,7 +732,8 @@ pub type SimpleRefPeerManager< &'h SimpleRefOnionMessenger<'a, 'b, 'c, 'd, 'e, 'graph, 'logger, 'i, 'j, 'k, M, T, F, L>, &'logger L, IgnoringMessageHandler, - &'c KeysManager + &'c KeysManager, + &'j ChainMonitor<&'a M, C, &'b T, &'c F, &'logger L, &'c KeysManager>, >; @@ -745,18 +758,21 @@ pub trait APeerManager { type CMH: Deref; type NST: NodeSigner + ?Sized; type NS: Deref; + type SMT: SendingOnlyMessageHandler + ?Sized; + type SM: Deref; /// Gets a reference to the underlying [`PeerManager`]. - fn as_ref(&self) -> &PeerManager; + fn as_ref(&self) -> &PeerManager; } -impl -APeerManager for PeerManager where +impl +APeerManager for PeerManager where CM::Target: ChannelMessageHandler, RM::Target: RoutingMessageHandler, OM::Target: OnionMessageHandler, L::Target: Logger, CMH::Target: CustomMessageHandler, NS::Target: NodeSigner, + SM::Target: SendingOnlyMessageHandler, { type Descriptor = Descriptor; type CMT = ::Target; @@ -771,7 +787,9 @@ APeerManager for PeerManager where type CMH = CMH; type NST = ::Target; type NS = NS; - fn as_ref(&self) -> &PeerManager { self } + type SMT = ::Target; + type SM = SM; + fn as_ref(&self) -> &PeerManager { self } } /// A PeerManager manages a set of peers, described by their [`SocketDescriptor`] and marshalls @@ -793,14 +811,15 @@ APeerManager for PeerManager where /// you're using lightning-net-tokio. /// /// [`read_event`]: PeerManager::read_event -pub struct PeerManager where +pub struct PeerManager where CM::Target: ChannelMessageHandler, RM::Target: RoutingMessageHandler, OM::Target: OnionMessageHandler, L::Target: Logger, CMH::Target: CustomMessageHandler, - NS::Target: NodeSigner { - message_handler: MessageHandler, + NS::Target: NodeSigner, + SM::Target: SendingOnlyMessageHandler { + message_handler: MessageHandler, /// Connection state for each connected peer - we have an outer read-write lock which is taken /// as read while we're doing processing for a peer and taken write when a peer is being added /// or removed. @@ -870,11 +889,12 @@ macro_rules! encode_msg { }} } -impl PeerManager where +impl PeerManager where CM::Target: ChannelMessageHandler, OM::Target: OnionMessageHandler, L::Target: Logger, - NS::Target: NodeSigner { + NS::Target: NodeSigner, + SM::Target: SendingOnlyMessageHandler { /// Constructs a new `PeerManager` with the given `ChannelMessageHandler` and /// `OnionMessageHandler`. No routing message handler is used and network graph messages are /// ignored. @@ -888,17 +908,18 @@ impl Pe /// minute should suffice. /// /// This is not exported to bindings users as we can't export a PeerManager with a dummy route handler - pub fn new_channel_only(channel_message_handler: CM, onion_message_handler: OM, current_time: u32, ephemeral_random_data: &[u8; 32], logger: L, node_signer: NS) -> Self { + pub fn new_channel_only(channel_message_handler: CM, onion_message_handler: OM, current_time: u32, ephemeral_random_data: &[u8; 32], logger: L, node_signer: NS, send_only_message_handler: SM) -> Self { Self::new(MessageHandler { chan_handler: channel_message_handler, route_handler: IgnoringMessageHandler{}, onion_message_handler, custom_message_handler: IgnoringMessageHandler{}, + send_only_message_handler, }, current_time, ephemeral_random_data, logger, node_signer) } } -impl PeerManager where +impl PeerManager where RM::Target: RoutingMessageHandler, L::Target: Logger, NS::Target: NodeSigner { @@ -922,6 +943,7 @@ impl PeerManager) -> Option } } -impl PeerManager where +impl PeerManager where CM::Target: ChannelMessageHandler, RM::Target: RoutingMessageHandler, OM::Target: OnionMessageHandler, L::Target: Logger, CMH::Target: CustomMessageHandler, - NS::Target: NodeSigner + NS::Target: NodeSigner, + SM::Target: SendingOnlyMessageHandler, { /// Constructs a new `PeerManager` with the given message handlers. /// @@ -984,7 +1007,7 @@ impl, current_time: u32, ephemeral_random_data: &[u8; 32], logger: L, node_signer: NS) -> Self { + pub fn new(message_handler: MessageHandler, current_time: u32, ephemeral_random_data: &[u8; 32], logger: L, node_signer: NS) -> Self { let mut ephemeral_key_midstate = Sha256::engine(); ephemeral_key_midstate.input(ephemeral_random_data); @@ -1813,6 +1836,12 @@ impl { self.message_handler.chan_handler.handle_channel_ready(their_node_id, &msg); }, + wire::Message::PeerStorageMessage(msg) => { + self.message_handler.chan_handler.handle_peer_storage(their_node_id, &msg); + }, + wire::Message::YourPeerStorageMessage(msg) => { + self.message_handler.chan_handler.handle_your_peer_storage(their_node_id, &msg); + }, // Quiescence messages: wire::Message::Stfu(msg) => { @@ -2075,6 +2104,7 @@ impl { + log_debug!(self.logger, "Handling SendPeerStorageMessage event in peer_handler for {}", log_pubkey!(node_id)); + self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg); + }, + MessageSendEvent::SendYourPeerStorageMessage { ref node_id, ref msg } => { + log_debug!(self.logger, "Handling SendYourPeerStorageMessage event in peer_handler for {}", log_pubkey!(node_id)); + self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg); + }, MessageSendEvent::SendAcceptChannel { ref node_id, ref msg } => { log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.common_fields.temporary_channel_id), None), "Handling SendAcceptChannel event in peer_handler for node {} for channel {}", log_pubkey!(node_id), @@ -2761,6 +2799,7 @@ mod tests { chan_handler: test_utils::TestChannelMessageHandler, routing_handler: test_utils::TestRoutingMessageHandler, custom_handler: TestCustomMessageHandler, + sending_handler: test_utils::TestSendingOnlyMessageHandler, logger: test_utils::TestLogger, node_signer: test_utils::TestNodeSigner, } @@ -2809,6 +2848,7 @@ mod tests { chan_handler: test_utils::TestChannelMessageHandler::new(ChainHash::using_genesis_block(Network::Testnet)), logger: test_utils::TestLogger::new(), routing_handler: test_utils::TestRoutingMessageHandler::new(), + sending_handler: test_utils::TestSendingOnlyMessageHandler::new(), custom_handler: TestCustomMessageHandler { features }, node_signer: test_utils::TestNodeSigner::new(node_secret), } @@ -2832,6 +2872,7 @@ mod tests { chan_handler: test_utils::TestChannelMessageHandler::new(ChainHash::using_genesis_block(Network::Testnet)), logger: test_utils::TestLogger::new(), routing_handler: test_utils::TestRoutingMessageHandler::new(), + sending_handler: test_utils::TestSendingOnlyMessageHandler::new(), custom_handler: TestCustomMessageHandler { features }, node_signer: test_utils::TestNodeSigner::new(node_secret), } @@ -2852,6 +2893,7 @@ mod tests { chan_handler: test_utils::TestChannelMessageHandler::new(network), logger: test_utils::TestLogger::new(), routing_handler: test_utils::TestRoutingMessageHandler::new(), + sending_handler: test_utils::TestSendingOnlyMessageHandler::new(), custom_handler: TestCustomMessageHandler { features }, node_signer: test_utils::TestNodeSigner::new(node_secret), } @@ -2861,13 +2903,13 @@ mod tests { cfgs } - fn create_network<'a>(peer_count: usize, cfgs: &'a Vec) -> Vec> { + fn create_network<'a>(peer_count: usize, cfgs: &'a Vec) -> Vec> { let mut peers = Vec::new(); for i in 0..peer_count { let ephemeral_bytes = [i as u8; 32]; let msg_handler = MessageHandler { chan_handler: &cfgs[i].chan_handler, route_handler: &cfgs[i].routing_handler, - onion_message_handler: IgnoringMessageHandler {}, custom_message_handler: &cfgs[i].custom_handler + onion_message_handler: IgnoringMessageHandler {}, custom_message_handler: &cfgs[i].custom_handler, send_only_message_handler: &cfgs[i].sending_handler, }; let peer = PeerManager::new(msg_handler, 0, &ephemeral_bytes, &cfgs[i].logger, &cfgs[i].node_signer); peers.push(peer); @@ -2876,7 +2918,7 @@ mod tests { peers } - fn establish_connection<'a>(peer_a: &PeerManager, peer_b: &PeerManager) -> (FileDescriptor, FileDescriptor) { + fn establish_connection<'a>(peer_a: &PeerManager, peer_b: &PeerManager) -> (FileDescriptor, FileDescriptor) { let id_a = peer_a.node_signer.get_node_id(Recipient::Node).unwrap(); let mut fd_a = FileDescriptor { fd: 1, outbound_data: Arc::new(Mutex::new(Vec::new())), @@ -3275,12 +3317,14 @@ mod tests { route_handler: IgnoringMessageHandler {}, onion_message_handler: IgnoringMessageHandler {}, custom_message_handler: IgnoringMessageHandler {}, + send_only_message_handler: IgnoringMessageHandler {}, }, 0, &[0; 32], &logger, &node_signer_a); let peer_b = PeerManager::new(MessageHandler { chan_handler: ErroringMessageHandler::new(), route_handler: IgnoringMessageHandler {}, onion_message_handler: IgnoringMessageHandler {}, custom_message_handler: IgnoringMessageHandler {}, + send_only_message_handler: IgnoringMessageHandler {}, }, 0, &[1; 32], &logger, &node_signer_b); let a_id = node_signer_a.get_node_id(Recipient::Node).unwrap(); diff --git a/lightning/src/ln/wire.rs b/lightning/src/ln/wire.rs index 4cf5e21c173..598b9253876 100644 --- a/lightning/src/ln/wire.rs +++ b/lightning/src/ln/wire.rs @@ -53,6 +53,8 @@ pub(crate) enum Message where T: core::fmt::Debug + Type + TestEq { Warning(msgs::WarningMessage), Ping(msgs::Ping), Pong(msgs::Pong), + PeerStorageMessage(msgs::PeerStorageMessage), + YourPeerStorageMessage(msgs::YourPeerStorageMessage), OpenChannel(msgs::OpenChannel), OpenChannelV2(msgs::OpenChannelV2), AcceptChannel(msgs::AcceptChannel), @@ -111,6 +113,8 @@ impl Writeable for Message where T: core::fmt::Debug + Type + TestEq { &Message::Warning(ref msg) => msg.write(writer), &Message::Ping(ref msg) => msg.write(writer), &Message::Pong(ref msg) => msg.write(writer), + &Message::PeerStorageMessage(ref msg) => msg.write(writer), + &Message::YourPeerStorageMessage(ref msg) => msg.write(writer), &Message::OpenChannel(ref msg) => msg.write(writer), &Message::OpenChannelV2(ref msg) => msg.write(writer), &Message::AcceptChannel(ref msg) => msg.write(writer), @@ -169,6 +173,8 @@ impl Type for Message where T: core::fmt::Debug + Type + TestEq { &Message::Warning(ref msg) => msg.type_id(), &Message::Ping(ref msg) => msg.type_id(), &Message::Pong(ref msg) => msg.type_id(), + &Message::PeerStorageMessage(ref msg) => msg.type_id(), + &Message::YourPeerStorageMessage(ref msg) => msg.type_id(), &Message::OpenChannel(ref msg) => msg.type_id(), &Message::OpenChannelV2(ref msg) => msg.type_id(), &Message::AcceptChannel(ref msg) => msg.type_id(), @@ -261,6 +267,12 @@ fn do_read(buffer: &mut R, message_type: u1 msgs::Pong::TYPE => { Ok(Message::Pong(Readable::read(buffer)?)) }, + msgs::PeerStorageMessage::TYPE => { + Ok(Message::PeerStorageMessage(Readable::read(buffer)?)) + }, + msgs::YourPeerStorageMessage::TYPE => { + Ok(Message::YourPeerStorageMessage(Readable::read(buffer)?)) + }, msgs::OpenChannel::TYPE => { Ok(Message::OpenChannel(Readable::read(buffer)?)) }, @@ -625,6 +637,14 @@ impl Encode for msgs::GossipTimestampFilter { const TYPE: u16 = 265; } +impl Encode for msgs::PeerStorageMessage { + const TYPE: u16 = 7; +} + +impl Encode for msgs::YourPeerStorageMessage { + const TYPE: u16 = 9; +} + #[cfg(test)] mod tests { use super::*; diff --git a/lightning/src/sign/mod.rs b/lightning/src/sign/mod.rs index 8ad34f2d653..7c1802e9372 100644 --- a/lightning/src/sign/mod.rs +++ b/lightning/src/sign/mod.rs @@ -835,6 +835,35 @@ pub trait NodeSigner { /// [phantom node payments]: PhantomKeysManager fn get_inbound_payment_key_material(&self) -> KeyMaterial; + /// Generates a 32-byte key used for peer storage encryption. + /// + /// This function derives an encryption key for peer storage by using the HKDF + /// (HMAC-based Key Derivation Function) with a specific label and the node + /// secret key. The derived key is used for encrypting or decrypting peer storage + /// data. + /// + /// The process involves the following steps: + /// 1. Retrieves the node secret key. + /// 2. Uses the node secret key and the label `"Peer Storage Encryption Key"` + /// to perform HKDF extraction and expansion. + /// 3. Returns the first part of the derived key, which is a 32-byte array. + /// + /// # Returns + /// + /// Returns a 32-byte array that serves as the encryption key for peer storage. + /// + /// # Panics + /// + /// This function does not panic under normal circumstances, but failures in + /// obtaining the node secret key or issues within the HKDF function may cause + /// unexpected behavior. + /// + /// # Notes + /// + /// Ensure that the node secret key is securely managed, as it is crucial for + /// the security of the derived encryption key. + fn get_peer_storage_key(&self) -> [u8; 32]; + /// Get node id based on the provided [`Recipient`]. /// /// This method must return the same value each time it is called with a given [`Recipient`] @@ -2174,6 +2203,14 @@ impl NodeSigner for KeysManager { self.inbound_payment_key.clone() } + fn get_peer_storage_key(&self) -> [u8; 32] { + let (t1, _) = hkdf_extract_expand_twice( + b"Peer Storage Encryption Key", + &self.get_node_secret_key().secret_bytes(), + ); + t1 + } + fn sign_invoice( &self, invoice: &RawBolt11Invoice, recipient: Recipient, ) -> Result { @@ -2352,6 +2389,14 @@ impl NodeSigner for PhantomKeysManager { self.inbound_payment_key.clone() } + fn get_peer_storage_key(&self) -> [u8; 32] { + let (t1, _) = hkdf_extract_expand_twice( + b"Peer Storage Encryption Key", + &self.get_node_secret_key().secret_bytes(), + ); + t1 + } + fn sign_invoice( &self, invoice: &RawBolt11Invoice, recipient: Recipient, ) -> Result { diff --git a/lightning/src/util/test_utils.rs b/lightning/src/util/test_utils.rs index 12e027d32fc..c79723a25da 100644 --- a/lightning/src/util/test_utils.rs +++ b/lightning/src/util/test_utils.rs @@ -22,7 +22,7 @@ use crate::chain::channelmonitor::MonitorEvent; use crate::chain::transaction::OutPoint; use crate::routing::router::{CandidateRouteHop, FirstHopCandidate, PublicHopCandidate, PrivateHopCandidate}; use crate::sign; -use crate::events; +use crate::events::{self, MessageSendEvent, MessageSendEventsProvider}; use crate::events::bump_transaction::{WalletSource, Utxo}; use crate::ln::types::ChannelId; use crate::ln::channel_state::ChannelDetails; @@ -355,7 +355,7 @@ impl<'a> TestChainMonitor<'a> { added_monitors: Mutex::new(Vec::new()), monitor_updates: Mutex::new(new_hash_map()), latest_monitor_update_id: Mutex::new(new_hash_map()), - chain_monitor: chainmonitor::ChainMonitor::new(chain_source, broadcaster, logger, fee_estimator, persister), + chain_monitor: chainmonitor::ChainMonitor::new(chain_source, broadcaster, logger, fee_estimator, persister, keys_manager.get_peer_storage_key()), keys_manager, expect_channel_force_closed: Mutex::new(None), expect_monitor_round_trip_fail: Mutex::new(None), @@ -734,6 +734,32 @@ impl chaininterface::BroadcasterInterface for TestBroadcaster { } } +pub struct TestSendingOnlyMessageHandler { + pub pending_events: Mutex>, +} +impl TestSendingOnlyMessageHandler { + pub fn new() -> Self { + TestSendingOnlyMessageHandler { + pending_events: Mutex::new(Vec::new()), + } + } +} + +impl MessageSendEventsProvider for TestSendingOnlyMessageHandler { + fn get_and_clear_pending_msg_events(&self) -> Vec { + let mut pending_events = self.pending_events.lock().unwrap(); + let mut ret = Vec::new(); + mem::swap(&mut ret, &mut *pending_events); + ret + } +} +impl msgs::SendingOnlyMessageHandler for TestSendingOnlyMessageHandler { + fn send_peer_storage(&self, their_node_id: PublicKey) { + self.pending_events.lock().unwrap().push(events::MessageSendEvent::SendPeerStorageMessage { node_id: their_node_id, + msg: msgs::PeerStorageMessage{data: Vec::new()} }) + } +} + pub struct TestChannelMessageHandler { pub pending_events: Mutex>, expected_recv_msgs: Mutex>>>, @@ -917,6 +943,14 @@ impl msgs::ChannelMessageHandler for TestChannelMessageHandler { self.received_msg(wire::Message::TxAbort(msg.clone())); } + fn handle_peer_storage(&self, _their_node_id: PublicKey, msg: &msgs::PeerStorageMessage) { + self.received_msg(wire::Message::PeerStorageMessage(msg.clone())); + } + + fn handle_your_peer_storage(&self, _their_node_id: PublicKey, msg: &msgs::YourPeerStorageMessage) { + self.received_msg(wire::Message::YourPeerStorageMessage(msg.clone())); + } + fn message_received(&self) {} } @@ -1187,6 +1221,10 @@ impl NodeSigner for TestNodeSigner { unreachable!() } + fn get_peer_storage_key(&self) -> [u8;32] { + unreachable!() + } + fn get_node_id(&self, recipient: Recipient) -> Result { let node_secret = match recipient { Recipient::Node => Ok(&self.node_secret), @@ -1263,6 +1301,10 @@ impl NodeSigner for TestKeysInterface { self.backing.sign_invoice(invoice, recipient) } + fn get_peer_storage_key(&self) -> [u8;32] { + self.backing.get_peer_storage_key() + } + fn sign_bolt12_invoice_request( &self, invoice_request: &UnsignedInvoiceRequest ) -> Result { @@ -1415,6 +1457,11 @@ impl TestChainSource { self.watched_outputs.lock().unwrap().remove(&(outpoint, script_pubkey.clone())); self.watched_txn.lock().unwrap().remove(&(outpoint.txid, script_pubkey)); } + + pub fn clear_watched_txn_and_outputs(&self) { + self.watched_outputs.lock().unwrap().clear(); + self.watched_txn.lock().unwrap().clear(); + } } impl UtxoLookup for TestChainSource {