From d61d698bb8e882cec995e6fe4efd80c576ff88fa Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Fri, 18 Jun 2021 20:21:42 +0000 Subject: [PATCH 01/14] Don't print file paths in fuzz logger as they can be very long --- fuzz/src/utils/test_logger.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fuzz/src/utils/test_logger.rs b/fuzz/src/utils/test_logger.rs index 5046869c0a9..f8c96f99bd1 100644 --- a/fuzz/src/utils/test_logger.rs +++ b/fuzz/src/utils/test_logger.rs @@ -58,7 +58,7 @@ impl<'a, Out: Output> Write for LockedWriteAdapter<'a, Out> { impl Logger for TestLogger { fn log(&self, record: &Record) { write!(LockedWriteAdapter(&self.out), - "{:<5} {} [{} : {}, {}] {}\n", record.level.to_string(), self.id, record.module_path, record.file, record.line, record.args) + "{:<5} {} [{} : {}] {}\n", record.level.to_string(), self.id, record.module_path, record.line, record.args) .unwrap(); } } From c5a6135edcf3c30520d6b1c8e26991feaaf87429 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Fri, 18 Jun 2021 20:21:58 +0000 Subject: [PATCH 02/14] log_debug information about network graph updates from payments --- lightning/src/routing/network_graph.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/lightning/src/routing/network_graph.rs b/lightning/src/routing/network_graph.rs index b4448f72204..2c55d0a1d52 100644 --- a/lightning/src/routing/network_graph.rs +++ b/lightning/src/routing/network_graph.rs @@ -169,12 +169,16 @@ impl RoutingMessageHandler for NetGraphMsgHandler wh fn handle_htlc_fail_channel_update(&self, update: &msgs::HTLCFailChannelUpdate) { match update { &msgs::HTLCFailChannelUpdate::ChannelUpdateMessage { ref msg } => { + let chan_enabled = msg.contents.flags & (1 << 1) != (1 << 1); + log_debug!(self.logger, "Updating channel with channel_update from a payment failure. Channel {} is {}abled.", msg.contents.short_channel_id, if chan_enabled { "en" } else { "dis" }); let _ = self.network_graph.write().unwrap().update_channel(msg, &self.secp_ctx); }, &msgs::HTLCFailChannelUpdate::ChannelClosed { short_channel_id, is_permanent } => { + log_debug!(self.logger, "{} channel graph entry for {} due to a payment failure.", if is_permanent { "Removing" } else { "Disabling" }, short_channel_id); self.network_graph.write().unwrap().close_channel_from_update(short_channel_id, is_permanent); }, &msgs::HTLCFailChannelUpdate::NodeFailure { ref node_id, is_permanent } => { + log_debug!(self.logger, "{} node graph entry for {} due to a payment failure.", if is_permanent { "Removing" } else { "Disabling" }, node_id); self.network_graph.write().unwrap().fail_node(node_id, is_permanent); }, } From 95f9523097e9ed7535fdbfcd98cf6903d036c0f3 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Mon, 21 Jun 2021 16:00:27 +0000 Subject: [PATCH 03/14] Drop rust-bitcoin crate patches in fuzz now that they're merged These patches have been merged upstream and are in releases now so we don't need to patch them locally. --- fuzz/Cargo.toml | 6 ------ 1 file changed, 6 deletions(-) diff --git a/fuzz/Cargo.toml b/fuzz/Cargo.toml index cbbe78271d4..7c3d42067da 100644 --- a/fuzz/Cargo.toml +++ b/fuzz/Cargo.toml @@ -24,12 +24,6 @@ hex = "0.3" honggfuzz = { version = "0.5", optional = true } libfuzzer-sys = { git = "https://github.com/rust-fuzz/libfuzzer-sys.git", optional = true } -[patch.crates-io] -# Rust-Secp256k1 PR 282. This patch should be dropped once that is merged. -secp256k1 = { git = 'https://github.com/TheBlueMatt/rust-secp256k1', rev = 'c8615128097e0205dcf69e515e56fb57e5c97138' } -# bitcoin_hashes PR 111 (without the top commit). This patch should be dropped once that is merged. -bitcoin_hashes = { git = 'https://github.com/TheBlueMatt/bitcoin_hashes', rev = 'c90d26339a3e34fd2f942aa80298f410cc41b743' } - [build-dependencies] cc = "1.0" From a146ef2be28d9f4e4daeb8f88797115931824578 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Mon, 21 Jun 2021 17:36:46 +0000 Subject: [PATCH 04/14] Do not generate error messages when we receive our own gossip When a peer sends us the routing graph, it may include gossip messages for our channels, despite it not being a party to them. This is completely fine, but we currently print a somewhat-scary looking log messages in these cases, eg: ``` ERROR [lightning::ln::channelmanager:4104] Got a message for a channel from the wrong node! TRACE [lightning::ln::peer_handler:1267] Handling SendErrorMessage HandleError event in peer_handler for node ... with message Got a message for a channel from the wrong node! ``` Instead, we should simply not consider this an "error" condition and stay silent. --- lightning/src/ln/channelmanager.rs | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index ee266849b8b..81fb4561f18 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -3354,8 +3354,13 @@ impl ChannelMana match channel_state.by_id.entry(chan_id) { hash_map::Entry::Occupied(mut chan) => { if chan.get().get_counterparty_node_id() != *counterparty_node_id { - // TODO: see issue #153, need a consistent behavior on obnoxious behavior from random node - return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), chan_id)); + if chan.get().should_announce() { + // If the announcement is about a channel of ours which is public, some + // other peer may simply be forwarding all its gossip to us. Don't provide + // a scary-looking error message and return Ok instead. + return Ok(()); + } + return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a channel_update for a channel from the wrong node - it shouldn't know about our private channels!".to_owned(), chan_id)); } try_chan_entry!(self, chan.get_mut().channel_update(&msg), channel_state, chan); }, From 133e28ffe6de4491ccde71f6081efa0e76491b57 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Mon, 21 Jun 2021 18:03:19 +0000 Subject: [PATCH 05/14] Add error logs when a ChannelManager as inconsistent monitor state We had a client application which provided inconsistent monitor state when deserializing a ChannelManager, resulting in opaque and generic "InvalidData" deserialization failures. Instead, we log some informative (and appropriately scary) warning messages in such cases. --- lightning/src/ln/channelmanager.rs | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 81fb4561f18..cddb402d987 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -4647,6 +4647,11 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> channel.get_cur_counterparty_commitment_transaction_number() < monitor.get_cur_counterparty_commitment_number() || channel.get_latest_monitor_update_id() > monitor.get_latest_update_id() { // If the channel is ahead of the monitor, return InvalidValue: + log_error!(args.logger, "A ChannelMonitor is stale compared to the current ChannelManager! This indicates a potentially-critical violation of the chain::Watch API!"); + log_error!(args.logger, " The ChannelMonitor for channel {} is at update_id {} but the ChannelManager is at update_id {}.", + log_bytes!(channel.channel_id()), monitor.get_latest_update_id(), channel.get_latest_monitor_update_id()); + log_error!(args.logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,"); + log_error!(args.logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!"); return Err(DecodeError::InvalidValue); } else if channel.get_cur_holder_commitment_transaction_number() > monitor.get_cur_holder_commitment_number() || channel.get_revoked_counterparty_commitment_transaction_number() > monitor.get_min_seen_secret() || @@ -4663,6 +4668,9 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> by_id.insert(channel.channel_id(), channel); } } else { + log_error!(args.logger, "Missing ChannelMonitor for channel {} needed by ChannelManager.", log_bytes!(channel.channel_id())); + log_error!(args.logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,"); + log_error!(args.logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!"); return Err(DecodeError::InvalidValue); } } From 3ea4279d55d45bb0150b31b195e100c8ee95f5d9 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Tue, 22 Jun 2021 01:30:19 +0000 Subject: [PATCH 06/14] Unify message sending to use PeerManager::enqueue_message This makes our logging consistent and somewhat simplifies message sending code in a few places. --- lightning/src/ln/peer_handler.rs | 76 ++++++++++++-------------------- 1 file changed, 27 insertions(+), 49 deletions(-) diff --git a/lightning/src/ln/peer_handler.rs b/lightning/src/ln/peer_handler.rs index 5546227eff2..5fba871e0c4 100644 --- a/lightning/src/ln/peer_handler.rs +++ b/lightning/src/ln/peer_handler.rs @@ -549,14 +549,6 @@ impl PeerManager { - { - log_trace!(self.logger, "Encoding and sending sync update message of type {} to {}", $msg.type_id(), log_pubkey!(peer.their_node_id.unwrap())); - peer.pending_outbound_buffer.push_back(peer.channel_encryptor.encrypt_message(&encode_msg!($msg)[..])); - } - } - } while !peer.awaiting_write_event { if peer.pending_outbound_buffer.len() < OUTBOUND_BUFFER_LIMIT_READ_PAUSE { match peer.sync_status { @@ -565,12 +557,12 @@ impl PeerManager PeerManager PeerManager PeerManager { log_trace!(self.logger, "Handling SendOpenChannel event in peer_handler for node {} for channel {}", log_pubkey!(node_id), log_bytes!(msg.temporary_channel_id)); - let peer = get_peer_for_forwarding!(node_id); - peer.pending_outbound_buffer.push_back(peer.channel_encryptor.encrypt_message(&encode_msg!(msg))); + self.enqueue_message(get_peer_for_forwarding!(node_id), msg); }, MessageSendEvent::SendFundingCreated { ref node_id, ref msg } => { log_trace!(self.logger, "Handling SendFundingCreated event in peer_handler for node {} for channel {} (which becomes {})", @@ -1175,29 +1165,25 @@ impl PeerManager { log_trace!(self.logger, "Handling SendFundingSigned event in peer_handler for node {} for channel {}", log_pubkey!(node_id), log_bytes!(msg.channel_id)); - let peer = get_peer_for_forwarding!(node_id); - peer.pending_outbound_buffer.push_back(peer.channel_encryptor.encrypt_message(&encode_msg!(msg))); + self.enqueue_message(get_peer_for_forwarding!(node_id), msg); }, MessageSendEvent::SendFundingLocked { ref node_id, ref msg } => { log_trace!(self.logger, "Handling SendFundingLocked event in peer_handler for node {} for channel {}", log_pubkey!(node_id), log_bytes!(msg.channel_id)); - let peer = get_peer_for_forwarding!(node_id); - peer.pending_outbound_buffer.push_back(peer.channel_encryptor.encrypt_message(&encode_msg!(msg))); + self.enqueue_message(get_peer_for_forwarding!(node_id), msg); }, MessageSendEvent::SendAnnouncementSignatures { ref node_id, ref msg } => { log_trace!(self.logger, "Handling SendAnnouncementSignatures event in peer_handler for node {} for channel {})", log_pubkey!(node_id), log_bytes!(msg.channel_id)); - let peer = get_peer_for_forwarding!(node_id); - peer.pending_outbound_buffer.push_back(peer.channel_encryptor.encrypt_message(&encode_msg!(msg))); + self.enqueue_message(get_peer_for_forwarding!(node_id), msg); }, MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => { log_trace!(self.logger, "Handling UpdateHTLCs event in peer_handler for node {} with {} adds, {} fulfills, {} fails for channel {}", @@ -1208,49 +1194,45 @@ impl PeerManager { log_trace!(self.logger, "Handling SendRevokeAndACK event in peer_handler for node {} for channel {}", log_pubkey!(node_id), log_bytes!(msg.channel_id)); - let peer = get_peer_for_forwarding!(node_id); - peer.pending_outbound_buffer.push_back(peer.channel_encryptor.encrypt_message(&encode_msg!(msg))); + self.enqueue_message(get_peer_for_forwarding!(node_id), msg); }, MessageSendEvent::SendClosingSigned { ref node_id, ref msg } => { log_trace!(self.logger, "Handling SendClosingSigned event in peer_handler for node {} for channel {}", log_pubkey!(node_id), log_bytes!(msg.channel_id)); - let peer = get_peer_for_forwarding!(node_id); - peer.pending_outbound_buffer.push_back(peer.channel_encryptor.encrypt_message(&encode_msg!(msg))); + self.enqueue_message(get_peer_for_forwarding!(node_id), msg); }, MessageSendEvent::SendShutdown { ref node_id, ref msg } => { log_trace!(self.logger, "Handling Shutdown event in peer_handler for node {} for channel {}", log_pubkey!(node_id), log_bytes!(msg.channel_id)); - let peer = get_peer_for_forwarding!(node_id); - peer.pending_outbound_buffer.push_back(peer.channel_encryptor.encrypt_message(&encode_msg!(msg))); + self.enqueue_message(get_peer_for_forwarding!(node_id), msg); }, MessageSendEvent::SendChannelReestablish { ref node_id, ref msg } => { log_trace!(self.logger, "Handling SendChannelReestablish event in peer_handler for node {} for channel {}", log_pubkey!(node_id), log_bytes!(msg.channel_id)); - let peer = get_peer_for_forwarding!(node_id); - peer.pending_outbound_buffer.push_back(peer.channel_encryptor.encrypt_message(&encode_msg!(msg))); + self.enqueue_message(get_peer_for_forwarding!(node_id), msg); }, MessageSendEvent::BroadcastChannelAnnouncement { msg, update_msg } => { log_trace!(self.logger, "Handling BroadcastChannelAnnouncement event in peer_handler for short channel id {}", msg.contents.short_channel_id); @@ -1283,7 +1265,7 @@ impl PeerManager PeerManager { - let peer = get_peer_for_forwarding!(node_id); - peer.pending_outbound_buffer.push_back(peer.channel_encryptor.encrypt_message(&encode_msg!(msg))); + self.enqueue_message(get_peer_for_forwarding!(node_id), msg); }, MessageSendEvent::SendShortIdsQuery { ref node_id, ref msg } => { - let peer = get_peer_for_forwarding!(node_id); - peer.pending_outbound_buffer.push_back(peer.channel_encryptor.encrypt_message(&encode_msg!(msg))); + self.enqueue_message(get_peer_for_forwarding!(node_id), msg); } MessageSendEvent::SendReplyChannelRange { ref node_id, ref msg } => { log_trace!(self.logger, "Handling SendReplyChannelRange event in peer_handler for node {} with num_scids={} first_blocknum={} number_of_blocks={}, sync_complete={}", @@ -1320,8 +1299,7 @@ impl PeerManager PeerManager Date: Tue, 22 Jun 2021 03:36:36 +0000 Subject: [PATCH 07/14] More consistently log in msg handling, incl full msg logging at trace This much more consistently logs information about messages sent/received, including logging the full messages being sent/received at the TRACE log level. Many other log messages which are more often of interest were moved to the DEBUG log level. --- lightning/src/ln/peer_handler.rs | 57 ++++++++++++++++++-------------- lightning/src/ln/wire.rs | 4 ++- 2 files changed, 35 insertions(+), 26 deletions(-) diff --git a/lightning/src/ln/peer_handler.rs b/lightning/src/ln/peer_handler.rs index 5fba871e0c4..5fab2c6009c 100644 --- a/lightning/src/ln/peer_handler.rs +++ b/lightning/src/ln/peer_handler.rs @@ -32,6 +32,7 @@ use routing::network_graph::NetGraphMsgHandler; use prelude::*; use alloc::collections::LinkedList; +use alloc::fmt::Debug; use std::sync::{Arc, Mutex}; use core::sync::atomic::{AtomicUsize, Ordering}; use core::{cmp, hash, fmt, mem}; @@ -670,12 +671,12 @@ impl PeerManager(&self, peer: &mut Peer, message: &M) { + fn enqueue_message(&self, peer: &mut Peer, message: &M) { let mut buffer = VecWriter(Vec::new()); wire::write(message, &mut buffer).unwrap(); // crash if the write failed let encoded_message = buffer.0; - log_trace!(self.logger, "Enqueueing message of type {} to {}", message.type_id(), log_pubkey!(peer.their_node_id.unwrap())); + log_trace!(self.logger, "Enqueueing message {:?} to {}", message, log_pubkey!(peer.their_node_id.unwrap())); peer.pending_outbound_buffer.push_back(peer.channel_encryptor.encrypt_message(&encoded_message[..])); } @@ -716,15 +717,15 @@ impl PeerManager { //TODO: Try to push msg - log_trace!(self.logger, "Got Err handling message, disconnecting peer because {}", e.err); + log_debug!(self.logger, "Got Err handling message, disconnecting peer with: {}", e.err); return Err(PeerHandleError{ no_connection_possible: false }); }, msgs::ErrorAction::IgnoreError => { - log_trace!(self.logger, "Got Err handling message, ignoring because {}", e.err); + log_debug!(self.logger, "Got ignored error handling message: {}", e.err); continue; }, msgs::ErrorAction::SendErrorMessage { msg } => { - log_trace!(self.logger, "Got Err handling message, sending Error message because {}", e.err); + log_debug!(self.logger, "Got Err handling message, sending Error message with: {}", e.err); self.enqueue_message(peer, &msg); continue; }, @@ -743,7 +744,7 @@ impl PeerManager { - log_trace!(self.logger, "Finished noise handshake for connection with {}", log_pubkey!(peer.their_node_id.unwrap())); + log_debug!(self.logger, "Finished noise handshake for connection with {}", log_pubkey!(peer.their_node_id.unwrap())); entry.insert(peer_descriptor.clone()) }, }; @@ -804,7 +805,7 @@ impl PeerManager return Err(PeerHandleError { no_connection_possible: false }), msgs::DecodeError::UnknownRequiredFeature => { - log_debug!(self.logger, "Got a channel/node announcement with an known required feature flag, you may want to update!"); + log_trace!(self.logger, "Got a channel/node announcement with an known required feature flag, you may want to update!"); continue; } msgs::DecodeError::InvalidValue => { @@ -818,7 +819,7 @@ impl PeerManager return Err(PeerHandleError { no_connection_possible: false }), msgs::DecodeError::Io(_) => return Err(PeerHandleError { no_connection_possible: false }), msgs::DecodeError::UnsupportedCompression => { - log_debug!(self.logger, "We don't support zlib-compressed message fields, ignoring message"); + log_trace!(self.logger, "We don't support zlib-compressed message fields, ignoring message"); continue; } } @@ -861,12 +862,12 @@ impl PeerManager Result, MessageHandlingError> { - log_trace!(self.logger, "Received message of type {} from {}", message.type_id(), log_pubkey!(peer.their_node_id.unwrap())); + log_trace!(self.logger, "Received message {:?} from {}", message, log_pubkey!(peer.their_node_id.unwrap())); // Need an Init as first message if let wire::Message::Init(_) = message { } else if peer.their_features.is_none() { - log_trace!(self.logger, "Peer {} sent non-Init first message", log_pubkey!(peer.their_node_id.unwrap())); + log_debug!(self.logger, "Peer {} sent non-Init first message", log_pubkey!(peer.their_node_id.unwrap())); return Err(PeerHandleError{ no_connection_possible: false }.into()); } @@ -876,7 +877,7 @@ impl PeerManager { if msg.features.requires_unknown_bits() { - log_info!(self.logger, "Peer features required unknown version bits"); + log_debug!(self.logger, "Peer features required unknown version bits"); return Err(PeerHandleError{ no_connection_possible: true }.into()); } if peer.their_features.is_some() { @@ -1043,6 +1044,7 @@ impl PeerManager, msg: &wire::Message, except_node: Option<&PublicKey>) { match msg { wire::Message::ChannelAnnouncement(ref msg) => { + log_trace!(self.logger, "Sending message to all peers except {:?} or the announced channel's counterparties: {:?}", except_node, msg); let encoded_msg = encode_msg!(msg); for (_, peer) in peers.peers.iter_mut() { @@ -1051,6 +1053,7 @@ impl PeerManager OUTBOUND_BUFFER_LIMIT_DROP_GOSSIP { + log_trace!(self.logger, "Skipping broadcast message to {:?} as its outbound buffer is full", peer.their_node_id); continue; } if peer.their_node_id.as_ref() == Some(&msg.contents.node_id_1) || @@ -1064,6 +1067,7 @@ impl PeerManager { + log_trace!(self.logger, "Sending message to all peers except {:?} or the announced node: {:?}", except_node, msg); let encoded_msg = encode_msg!(msg); for (_, peer) in peers.peers.iter_mut() { @@ -1072,6 +1076,7 @@ impl PeerManager OUTBOUND_BUFFER_LIMIT_DROP_GOSSIP { + log_trace!(self.logger, "Skipping broadcast message to {:?} as its outbound buffer is full", peer.their_node_id); continue; } if peer.their_node_id.as_ref() == Some(&msg.contents.node_id) { @@ -1084,6 +1089,7 @@ impl PeerManager { + log_trace!(self.logger, "Sending message to all peers except {:?}: {:?}", except_node, msg); let encoded_msg = encode_msg!(msg); for (_, peer) in peers.peers.iter_mut() { @@ -1092,6 +1098,7 @@ impl PeerManager OUTBOUND_BUFFER_LIMIT_DROP_GOSSIP { + log_trace!(self.logger, "Skipping broadcast message to {:?} as its outbound buffer is full", peer.their_node_id); continue; } if except_node.is_some() && peer.their_node_id.as_ref() == except_node { @@ -1147,19 +1154,19 @@ impl PeerManager { - log_trace!(self.logger, "Handling SendAcceptChannel event in peer_handler for node {} for channel {}", + log_debug!(self.logger, "Handling SendAcceptChannel event in peer_handler for node {} for channel {}", log_pubkey!(node_id), log_bytes!(msg.temporary_channel_id)); self.enqueue_message(get_peer_for_forwarding!(node_id), msg); }, MessageSendEvent::SendOpenChannel { ref node_id, ref msg } => { - log_trace!(self.logger, "Handling SendOpenChannel event in peer_handler for node {} for channel {}", + log_debug!(self.logger, "Handling SendOpenChannel event in peer_handler for node {} for channel {}", log_pubkey!(node_id), log_bytes!(msg.temporary_channel_id)); self.enqueue_message(get_peer_for_forwarding!(node_id), msg); }, MessageSendEvent::SendFundingCreated { ref node_id, ref msg } => { - log_trace!(self.logger, "Handling SendFundingCreated event in peer_handler for node {} for channel {} (which becomes {})", + log_debug!(self.logger, "Handling SendFundingCreated event in peer_handler for node {} for channel {} (which becomes {})", log_pubkey!(node_id), log_bytes!(msg.temporary_channel_id), log_funding_channel_id!(msg.funding_txid, msg.funding_output_index)); @@ -1168,25 +1175,25 @@ impl PeerManager { - log_trace!(self.logger, "Handling SendFundingSigned event in peer_handler for node {} for channel {}", + log_debug!(self.logger, "Handling SendFundingSigned event in peer_handler for node {} for channel {}", log_pubkey!(node_id), log_bytes!(msg.channel_id)); self.enqueue_message(get_peer_for_forwarding!(node_id), msg); }, MessageSendEvent::SendFundingLocked { ref node_id, ref msg } => { - log_trace!(self.logger, "Handling SendFundingLocked event in peer_handler for node {} for channel {}", + log_debug!(self.logger, "Handling SendFundingLocked event in peer_handler for node {} for channel {}", log_pubkey!(node_id), log_bytes!(msg.channel_id)); self.enqueue_message(get_peer_for_forwarding!(node_id), msg); }, MessageSendEvent::SendAnnouncementSignatures { ref node_id, ref msg } => { - log_trace!(self.logger, "Handling SendAnnouncementSignatures event in peer_handler for node {} for channel {})", + log_debug!(self.logger, "Handling SendAnnouncementSignatures event in peer_handler for node {} for channel {})", log_pubkey!(node_id), log_bytes!(msg.channel_id)); self.enqueue_message(get_peer_for_forwarding!(node_id), msg); }, MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => { - log_trace!(self.logger, "Handling UpdateHTLCs event in peer_handler for node {} with {} adds, {} fulfills, {} fails for channel {}", + log_debug!(self.logger, "Handling UpdateHTLCs event in peer_handler for node {} with {} adds, {} fulfills, {} fails for channel {}", log_pubkey!(node_id), update_add_htlcs.len(), update_fulfill_htlcs.len(), @@ -1211,44 +1218,44 @@ impl PeerManager { - log_trace!(self.logger, "Handling SendRevokeAndACK event in peer_handler for node {} for channel {}", + log_debug!(self.logger, "Handling SendRevokeAndACK event in peer_handler for node {} for channel {}", log_pubkey!(node_id), log_bytes!(msg.channel_id)); self.enqueue_message(get_peer_for_forwarding!(node_id), msg); }, MessageSendEvent::SendClosingSigned { ref node_id, ref msg } => { - log_trace!(self.logger, "Handling SendClosingSigned event in peer_handler for node {} for channel {}", + log_debug!(self.logger, "Handling SendClosingSigned event in peer_handler for node {} for channel {}", log_pubkey!(node_id), log_bytes!(msg.channel_id)); self.enqueue_message(get_peer_for_forwarding!(node_id), msg); }, MessageSendEvent::SendShutdown { ref node_id, ref msg } => { - log_trace!(self.logger, "Handling Shutdown event in peer_handler for node {} for channel {}", + log_debug!(self.logger, "Handling Shutdown event in peer_handler for node {} for channel {}", log_pubkey!(node_id), log_bytes!(msg.channel_id)); self.enqueue_message(get_peer_for_forwarding!(node_id), msg); }, MessageSendEvent::SendChannelReestablish { ref node_id, ref msg } => { - log_trace!(self.logger, "Handling SendChannelReestablish event in peer_handler for node {} for channel {}", + log_debug!(self.logger, "Handling SendChannelReestablish event in peer_handler for node {} for channel {}", log_pubkey!(node_id), log_bytes!(msg.channel_id)); self.enqueue_message(get_peer_for_forwarding!(node_id), msg); }, MessageSendEvent::BroadcastChannelAnnouncement { msg, update_msg } => { - log_trace!(self.logger, "Handling BroadcastChannelAnnouncement event in peer_handler for short channel id {}", msg.contents.short_channel_id); + log_debug!(self.logger, "Handling BroadcastChannelAnnouncement event in peer_handler for short channel id {}", msg.contents.short_channel_id); if self.message_handler.route_handler.handle_channel_announcement(&msg).is_ok() && self.message_handler.route_handler.handle_channel_update(&update_msg).is_ok() { self.forward_broadcast_msg(peers, &wire::Message::ChannelAnnouncement(msg), None); self.forward_broadcast_msg(peers, &wire::Message::ChannelUpdate(update_msg), None); } }, MessageSendEvent::BroadcastNodeAnnouncement { msg } => { - log_trace!(self.logger, "Handling BroadcastNodeAnnouncement event in peer_handler"); + log_debug!(self.logger, "Handling BroadcastNodeAnnouncement event in peer_handler"); if self.message_handler.route_handler.handle_node_announcement(&msg).is_ok() { self.forward_broadcast_msg(peers, &wire::Message::NodeAnnouncement(msg), None); } }, MessageSendEvent::BroadcastChannelUpdate { msg } => { - log_trace!(self.logger, "Handling BroadcastChannelUpdate event in peer_handler for short channel id {}", msg.contents.short_channel_id); + log_debug!(self.logger, "Handling BroadcastChannelUpdate event in peer_handler for short channel id {}", msg.contents.short_channel_id); if self.message_handler.route_handler.handle_channel_update(&msg).is_ok() { self.forward_broadcast_msg(peers, &wire::Message::ChannelUpdate(msg), None); } diff --git a/lightning/src/ln/wire.rs b/lightning/src/ln/wire.rs index 58d336ba069..0ee280b50e4 100644 --- a/lightning/src/ln/wire.rs +++ b/lightning/src/ln/wire.rs @@ -24,6 +24,7 @@ use util::ser::{Readable, Writeable, Writer}; /// A Lightning message returned by [`read()`] when decoding bytes received over the wire. Each /// variant contains a message from [`msgs`] or otherwise the message type if unknown. #[allow(missing_docs)] +#[derive(Debug)] pub enum Message { Init(msgs::Init), Error(msgs::ErrorMessage), @@ -58,10 +59,11 @@ pub enum Message { } /// A number identifying a message to determine how it is encoded on the wire. -#[derive(Clone, Copy)] +#[derive(Clone, Copy, Debug)] pub struct MessageType(u16); impl Message { + #[allow(dead_code)] // This method is only used in tests /// Returns the type that was used to decode the message payload. pub fn type_id(&self) -> MessageType { match self { From 7fa6a7d48e5cec139481ceee95baadb2cf06486e Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Mon, 28 Jun 2021 20:38:48 +0000 Subject: [PATCH 08/14] Allow logging to specify an explicit log level instead of a macro For log entries which may have a variable level, we can't call an arbitrary macro and need to be able to pass an explicit level. This does so without breaking the compile-time disabling of certain log levels. Further, we "fix" the comparison order of log levels to make more significant levels sort "higher", which implicitly makes more sense than sorting "lower". Finally, we remove the "Off" log level as no log entry should ever be logged at the "Off" level - that would be nonsensical. --- lightning/src/util/logger.rs | 51 +++++++++++++++++++++++------- lightning/src/util/macro_logger.rs | 39 +++++++++++++++++------ 2 files changed, 69 insertions(+), 21 deletions(-) diff --git a/lightning/src/util/logger.rs b/lightning/src/util/logger.rs index df2b7f70405..98037aac298 100644 --- a/lightning/src/util/logger.rs +++ b/lightning/src/util/logger.rs @@ -17,23 +17,21 @@ use core::cmp; use core::fmt; -static LOG_LEVEL_NAMES: [&'static str; 6] = ["OFF", "ERROR", "WARN", "INFO", "DEBUG", "TRACE"]; +static LOG_LEVEL_NAMES: [&'static str; 5] = ["TRACE", "DEBUG", "INFO", "WARN", "ERROR"]; /// An enum representing the available verbosity levels of the logger. #[derive(Copy, Clone, PartialEq, Eq, Debug, Hash)] pub enum Level { - ///Designates logger being silent - Off, - /// Designates very serious errors - Error, - /// Designates hazardous situations - Warn, - /// Designates useful information - Info, - /// Designates lower priority information - Debug, /// Designates very low priority, often extremely verbose, information Trace, + /// Designates lower priority information + Debug, + /// Designates useful information + Info, + /// Designates hazardous situations + Warn, + /// Designates very serious errors + Error, } impl PartialOrd for Level { @@ -163,4 +161,35 @@ mod tests { let wrapper = WrapperLog::new(Arc::clone(&logger)); wrapper.call_macros(); } + + #[test] + fn test_log_ordering() { + assert!(Level::Error > Level::Warn); + assert!(Level::Error >= Level::Warn); + assert!(Level::Error >= Level::Error); + assert!(Level::Warn > Level::Info); + assert!(Level::Warn >= Level::Info); + assert!(Level::Warn >= Level::Warn); + assert!(Level::Info > Level::Debug); + assert!(Level::Info >= Level::Debug); + assert!(Level::Info >= Level::Info); + assert!(Level::Debug > Level::Trace); + assert!(Level::Debug >= Level::Trace); + assert!(Level::Debug >= Level::Debug); + assert!(Level::Trace >= Level::Trace); + + assert!(Level::Error <= Level::Error); + assert!(Level::Warn < Level::Error); + assert!(Level::Warn <= Level::Error); + assert!(Level::Warn <= Level::Warn); + assert!(Level::Info < Level::Warn); + assert!(Level::Info <= Level::Warn); + assert!(Level::Info <= Level::Info); + assert!(Level::Debug < Level::Info); + assert!(Level::Debug <= Level::Info); + assert!(Level::Debug <= Level::Debug); + assert!(Level::Trace < Level::Debug); + assert!(Level::Trace <= Level::Debug); + assert!(Level::Trace <= Level::Trace); + } } diff --git a/lightning/src/util/macro_logger.rs b/lightning/src/util/macro_logger.rs index c4630c638c7..3ac294fbff9 100644 --- a/lightning/src/util/macro_logger.rs +++ b/lightning/src/util/macro_logger.rs @@ -164,33 +164,53 @@ macro_rules! log_internal { ); } +/// Logs an entry at the given level. +#[macro_export] +macro_rules! log_given_level { + ($logger: expr, $lvl:expr, $($arg:tt)+) => ( + match $lvl { + #[cfg(not(any(feature = "max_level_off")))] + $crate::util::logger::Level::Error => log_internal!($logger, $lvl, $($arg)*), + #[cfg(not(any(feature = "max_level_off", feature = "max_level_error")))] + $crate::util::logger::Level::Warn => log_internal!($logger, $lvl, $($arg)*), + #[cfg(not(any(feature = "max_level_off", feature = "max_level_error", feature = "max_level_warn")))] + $crate::util::logger::Level::Info => log_internal!($logger, $lvl, $($arg)*), + #[cfg(not(any(feature = "max_level_off", feature = "max_level_error", feature = "max_level_warn", feature = "max_level_info")))] + $crate::util::logger::Level::Debug => log_internal!($logger, $lvl, $($arg)*), + #[cfg(not(any(feature = "max_level_off", feature = "max_level_error", feature = "max_level_warn", feature = "max_level_info", feature = "max_level_debug")))] + $crate::util::logger::Level::Trace => log_internal!($logger, $lvl, $($arg)*), + + #[cfg(any(feature = "max_level_off", feature = "max_level_error", feature = "max_level_warn", feature = "max_level_info", feature = "max_level_debug"))] + _ => { + // The level is disabled at compile-time + }, + } + ); +} + /// Log an error. #[macro_export] macro_rules! log_error { ($logger: expr, $($arg:tt)*) => ( - #[cfg(not(any(feature = "max_level_off")))] - log_internal!($logger, $crate::util::logger::Level::Error, $($arg)*); + log_given_level!($logger, $crate::util::logger::Level::Error, $($arg)*); ) } macro_rules! log_warn { ($logger: expr, $($arg:tt)*) => ( - #[cfg(not(any(feature = "max_level_off", feature = "max_level_error")))] - log_internal!($logger, $crate::util::logger::Level::Warn, $($arg)*); + log_given_level!($logger, $crate::util::logger::Level::Warn, $($arg)*); ) } macro_rules! log_info { ($logger: expr, $($arg:tt)*) => ( - #[cfg(not(any(feature = "max_level_off", feature = "max_level_error", feature = "max_level_warn")))] - log_internal!($logger, $crate::util::logger::Level::Info, $($arg)*); + log_given_level!($logger, $crate::util::logger::Level::Info, $($arg)*); ) } macro_rules! log_debug { ($logger: expr, $($arg:tt)*) => ( - #[cfg(not(any(feature = "max_level_off", feature = "max_level_error", feature = "max_level_warn", feature = "max_level_info")))] - log_internal!($logger, $crate::util::logger::Level::Debug, $($arg)*); + log_given_level!($logger, $crate::util::logger::Level::Debug, $($arg)*); ) } @@ -198,7 +218,6 @@ macro_rules! log_debug { #[macro_export] macro_rules! log_trace { ($logger: expr, $($arg:tt)*) => ( - #[cfg(not(any(feature = "max_level_off", feature = "max_level_error", feature = "max_level_warn", feature = "max_level_info", feature = "max_level_debug")))] - log_internal!($logger, $crate::util::logger::Level::Trace, $($arg)*); + log_given_level!($logger, $crate::util::logger::Level::Trace, $($arg)*); ) } From 1f592b045fb6b2788c595e573412aa93f3ebc850 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Tue, 22 Jun 2021 01:33:44 +0000 Subject: [PATCH 09/14] Do not log_debug when we receive duplicate gossip messages We very often receive duplicate gossip messages, which now causes us to log at the DEBUG level, which is almost certainly not what a user wants. Instead, we add a new form of ErrorAction which causes us to only log at the TRACE level. --- lightning/src/ln/msgs.rs | 5 +++++ lightning/src/ln/peer_handler.rs | 19 ++++++++++++++----- lightning/src/routing/network_graph.rs | 8 ++++---- 3 files changed, 23 insertions(+), 9 deletions(-) diff --git a/lightning/src/ln/msgs.rs b/lightning/src/ln/msgs.rs index f02a478357b..c45e2277fb8 100644 --- a/lightning/src/ln/msgs.rs +++ b/lightning/src/ln/msgs.rs @@ -38,6 +38,7 @@ use core::fmt::Debug; use std::io::Read; use util::events::MessageSendEventsProvider; +use util::logger; use util::ser::{Readable, Writeable, Writer, FixedLengthReader, HighZeroBytesDroppedVarInt}; use ln::{PaymentPreimage, PaymentHash, PaymentSecret}; @@ -688,7 +689,11 @@ pub enum ErrorAction { msg: Option }, /// The peer did something harmless that we weren't able to process, just log and ignore + // New code should *not* use this. New code must use IgnoreAndLog, below! IgnoreError, + /// The peer did something harmless that we weren't able to meaningfully process. + /// If the error is logged, log it at the given level. + IgnoreAndLog(logger::Level), /// The peer did something incorrect. Tell them. SendErrorMessage { /// The message to send. diff --git a/lightning/src/ln/peer_handler.rs b/lightning/src/ln/peer_handler.rs index 5fab2c6009c..78ffcfd1de5 100644 --- a/lightning/src/ln/peer_handler.rs +++ b/lightning/src/ln/peer_handler.rs @@ -27,7 +27,7 @@ use ln::wire; use ln::wire::Encode; use util::byte_utils; use util::events::{MessageSendEvent, MessageSendEventsProvider}; -use util::logger::Logger; +use util::logger::{Logger, Level}; use routing::network_graph::NetGraphMsgHandler; use prelude::*; @@ -717,15 +717,19 @@ impl PeerManager { //TODO: Try to push msg - log_debug!(self.logger, "Got Err handling message, disconnecting peer with: {}", e.err); + log_debug!(self.logger, "Error handling message; disconnecting peer with: {}", e.err); return Err(PeerHandleError{ no_connection_possible: false }); }, + msgs::ErrorAction::IgnoreAndLog(level) => { + log_given_level!(self.logger, level, "Error handling message; ignoring: {}", e.err); + continue + }, msgs::ErrorAction::IgnoreError => { - log_debug!(self.logger, "Got ignored error handling message: {}", e.err); + log_debug!(self.logger, "Error handling message; ignoring: {}", e.err); continue; }, msgs::ErrorAction::SendErrorMessage { msg } => { - log_debug!(self.logger, "Got Err handling message, sending Error message with: {}", e.err); + log_debug!(self.logger, "Error handling message; sending error message with: {}", e.err); self.enqueue_message(peer, &msg); continue; }, @@ -1284,7 +1288,12 @@ impl PeerManager {}, + msgs::ErrorAction::IgnoreAndLog(level) => { + log_given_level!(self.logger, level, "Received a HandleError event to be ignored for node {}", log_pubkey!(node_id)); + }, + msgs::ErrorAction::IgnoreError => { + log_debug!(self.logger, "Received a HandleError event to be ignored for node {}", log_pubkey!(node_id)); + }, msgs::ErrorAction::SendErrorMessage { ref msg } => { log_trace!(self.logger, "Handling SendErrorMessage HandleError event in peer_handler for node {} with message {}", log_pubkey!(node_id), diff --git a/lightning/src/routing/network_graph.rs b/lightning/src/routing/network_graph.rs index 2c55d0a1d52..6fe8912bd16 100644 --- a/lightning/src/routing/network_graph.rs +++ b/lightning/src/routing/network_graph.rs @@ -28,7 +28,7 @@ use ln::msgs::{ChannelAnnouncement, ChannelUpdate, NodeAnnouncement, OptionalFie use ln::msgs::{QueryChannelRange, ReplyChannelRange, QueryShortChannelIds, ReplyShortChannelIdsEnd}; use ln::msgs; use util::ser::{Writeable, Readable, Writer}; -use util::logger::Logger; +use util::logger::{Logger, Level}; use util::events::{MessageSendEvent, MessageSendEventsProvider}; use util::scid_utils::{block_from_scid, scid_from_parts, MAX_SCID_BLOCK}; @@ -717,7 +717,7 @@ impl NetworkGraph { Some(node) => { if let Some(node_info) = node.announcement_info.as_ref() { if node_info.last_update >= msg.timestamp { - return Err(LightningError{err: "Update older than last processed update".to_owned(), action: ErrorAction::IgnoreError}); + return Err(LightningError{err: "Update older than last processed update".to_owned(), action: ErrorAction::IgnoreAndLog(Level::Trace)}); } } @@ -838,7 +838,7 @@ impl NetworkGraph { Self::remove_channel_in_nodes(&mut self.nodes, &entry.get(), msg.short_channel_id); *entry.get_mut() = chan_info; } else { - return Err(LightningError{err: "Already have knowledge of channel".to_owned(), action: ErrorAction::IgnoreError}) + return Err(LightningError{err: "Already have knowledge of channel".to_owned(), action: ErrorAction::IgnoreAndLog(Level::Trace)}) } }, BtreeEntry::Vacant(entry) => { @@ -940,7 +940,7 @@ impl NetworkGraph { ( $target: expr, $src_node: expr) => { if let Some(existing_chan_info) = $target.as_ref() { if existing_chan_info.last_update >= msg.timestamp { - return Err(LightningError{err: "Update older than last processed update".to_owned(), action: ErrorAction::IgnoreError}); + return Err(LightningError{err: "Update older than last processed update".to_owned(), action: ErrorAction::IgnoreAndLog(Level::Trace)}); } chan_was_enabled = existing_chan_info.enabled; } else { From 7eff56b12f1f9cccb80e7b8b8efc69e569f89985 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Tue, 22 Jun 2021 03:35:52 +0000 Subject: [PATCH 10/14] Update logging in channel and channelmanager to better levels This updates a number of log sites in channel and channelmanager to * Be a bit more verbose at the TRACE level, * Move some error/useful messages to the ERROR/WARN/INFO level, * Add new logs to always log once at the DEBUG level when we send/receive a commitment_signed (with some extra data), * Include the channel id being operated on in more log messages. --- lightning/src/ln/channel.rs | 108 ++++++++++++++++++--------- lightning/src/ln/channelmanager.rs | 26 ++++--- lightning/src/ln/functional_tests.rs | 13 ++-- 3 files changed, 98 insertions(+), 49 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index f8c8c15be95..26dd61d686f 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -909,7 +909,10 @@ impl Channel { let mut local_htlc_total_msat = 0; let mut value_to_self_msat_offset = 0; - log_trace!(logger, "Building commitment transaction number {} (really {} xor {}) for {}, generated by {} with fee {}...", commitment_number, (INITIAL_COMMITMENT_NUMBER - commitment_number), get_commitment_transaction_number_obscure_factor(&self.get_holder_pubkeys().payment_point, &self.get_counterparty_pubkeys().payment_point, self.is_outbound()), if local { "us" } else { "remote" }, if generated_by_local { "us" } else { "remote" }, feerate_per_kw); + log_trace!(logger, "Building commitment transaction number {} (really {} xor {}) for channel {} for {}, generated by {} with fee {}...", + commitment_number, (INITIAL_COMMITMENT_NUMBER - commitment_number), + get_commitment_transaction_number_obscure_factor(&self.get_holder_pubkeys().payment_point, &self.get_counterparty_pubkeys().payment_point, self.is_outbound()), + log_bytes!(self.channel_id), if local { "us" } else { "remote" }, if generated_by_local { "us" } else { "remote" }, feerate_per_kw); macro_rules! get_htlc_in_commitment { ($htlc: expr, $offered: expr) => { @@ -1291,7 +1294,7 @@ impl Channel { _ => {} } } - log_trace!(logger, "Adding HTLC claim to holding_cell! Current state: {}", self.channel_state); + log_trace!(logger, "Adding HTLC claim to holding_cell in channel {}! Current state: {}", log_bytes!(self.channel_id()), self.channel_state); self.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::ClaimHTLC { payment_preimage: payment_preimage_arg, htlc_id: htlc_id_arg, }); @@ -1305,7 +1308,7 @@ impl Channel { debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to"); return Ok((None, Some(monitor_update))); } - log_trace!(logger, "Upgrading HTLC {} to LocalRemoved with a Fulfill!", log_bytes!(htlc.payment_hash.0)); + log_trace!(logger, "Upgrading HTLC {} to LocalRemoved with a Fulfill in channel {}!", log_bytes!(htlc.payment_hash.0), log_bytes!(self.channel_id)); htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(payment_preimage_arg.clone())); } @@ -1391,7 +1394,7 @@ impl Channel { _ => {} } } - log_trace!(logger, "Placing failure for HTLC ID {} in holding cell", htlc_id_arg); + log_trace!(logger, "Placing failure for HTLC ID {} in holding cell in channel {}.", htlc_id_arg, log_bytes!(self.channel_id())); self.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::FailHTLC { htlc_id: htlc_id_arg, err_packet, @@ -1399,7 +1402,7 @@ impl Channel { return Ok(None); } - log_trace!(logger, "Failing HTLC ID {} back with a update_fail_htlc message", htlc_id_arg); + log_trace!(logger, "Failing HTLC ID {} back with a update_fail_htlc message in channel {}.", htlc_id_arg, log_bytes!(self.channel_id())); { let htlc = &mut self.pending_inbound_htlcs[pending_idx]; htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(err_packet.clone())); @@ -1531,7 +1534,10 @@ impl Channel { let initial_commitment_bitcoin_tx = trusted_tx.built_transaction(); let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.channel_value_satoshis); // They sign the holder commitment transaction... - log_trace!(logger, "Checking funding_created tx signature {} by key {} against tx {} (sighash {}) with redeemscript {}", log_bytes!(sig.serialize_compact()[..]), log_bytes!(self.counterparty_funding_pubkey().serialize()), encode::serialize_hex(&initial_commitment_bitcoin_tx.transaction), log_bytes!(sighash[..]), encode::serialize_hex(&funding_script)); + log_trace!(logger, "Checking funding_created tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} for channel {}.", + log_bytes!(sig.serialize_compact()[..]), log_bytes!(self.counterparty_funding_pubkey().serialize()), + encode::serialize_hex(&initial_commitment_bitcoin_tx.transaction), log_bytes!(sighash[..]), + encode::serialize_hex(&funding_script), log_bytes!(self.channel_id())); secp_check!(self.secp_ctx.verify(&sighash, &sig, self.counterparty_funding_pubkey()), "Invalid funding_created signature from peer".to_owned()); } @@ -1540,7 +1546,8 @@ impl Channel { let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust(); let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction(); - log_trace!(logger, "Initial counterparty ID {} tx {}", counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction)); + log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}", + log_bytes!(self.channel_id()), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction)); let counterparty_signature = self.holder_signer.sign_counterparty_commitment(&counterparty_initial_commitment_tx, &self.secp_ctx) .map_err(|_| ChannelError::Close("Failed to get signatures for new commitment_signed".to_owned()))?.0; @@ -1616,6 +1623,8 @@ impl Channel { self.cur_counterparty_commitment_transaction_number -= 1; self.cur_holder_commitment_transaction_number -= 1; + log_info!(logger, "Generated funding_signed for peer for channel {}", log_bytes!(self.channel_id())); + Ok((msgs::FundingSigned { channel_id: self.channel_id, signature @@ -1644,7 +1653,8 @@ impl Channel { let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust(); let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction(); - log_trace!(logger, "Initial counterparty ID {} tx {}", counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction)); + log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}", + log_bytes!(self.channel_id()), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction)); let holder_signer = self.build_holder_transaction_keys(self.cur_holder_commitment_transaction_number)?; let initial_commitment_tx = self.build_commitment_transaction(self.cur_holder_commitment_transaction_number, &holder_signer, true, false, self.feerate_per_kw, logger).0; @@ -1686,10 +1696,12 @@ impl Channel { self.cur_holder_commitment_transaction_number -= 1; self.cur_counterparty_commitment_transaction_number -= 1; + log_info!(logger, "Received funding_signed from peer for channel {}", log_bytes!(self.channel_id())); + Ok((channel_monitor, self.funding_transaction.as_ref().cloned().unwrap())) } - pub fn funding_locked(&mut self, msg: &msgs::FundingLocked) -> Result<(), ChannelError> { + pub fn funding_locked(&mut self, msg: &msgs::FundingLocked, logger: &L) -> Result<(), ChannelError> where L::Target: Logger { if self.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 { return Err(ChannelError::Close("Peer sent funding_locked when we needed a channel_reestablish".to_owned())); } @@ -1719,6 +1731,9 @@ impl Channel { self.counterparty_prev_commitment_point = self.counterparty_cur_commitment_point; self.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point); + + log_info!(logger, "Received funding_locked from peer for channel {}", log_bytes!(self.channel_id())); + Ok(()) } @@ -2028,7 +2043,7 @@ impl Channel { if pending_remote_value_msat - msg.amount_msat - chan_reserve_msat < remote_fee_cost_incl_stuck_buffer_msat { // Note that if the pending_forward_status is not updated here, then it's because we're already failing // the HTLC, i.e. its status is already set to failing. - log_info!(logger, "Attempting to fail HTLC due to fee spike buffer violation"); + log_info!(logger, "Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", log_bytes!(self.channel_id())); pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7); } } else { @@ -2160,7 +2175,10 @@ impl Channel { let bitcoin_tx = trusted_tx.built_transaction(); let sighash = bitcoin_tx.get_sighash_all(&funding_script, self.channel_value_satoshis); - log_trace!(logger, "Checking commitment tx signature {} by key {} against tx {} (sighash {}) with redeemscript {}", log_bytes!(msg.signature.serialize_compact()[..]), log_bytes!(self.counterparty_funding_pubkey().serialize()), encode::serialize_hex(&bitcoin_tx.transaction), log_bytes!(sighash[..]), encode::serialize_hex(&funding_script)); + log_trace!(logger, "Checking commitment tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}", + log_bytes!(msg.signature.serialize_compact()[..]), + log_bytes!(self.counterparty_funding_pubkey().serialize()), encode::serialize_hex(&bitcoin_tx.transaction), + log_bytes!(sighash[..]), encode::serialize_hex(&funding_script), log_bytes!(self.channel_id())); if let Err(_) = self.secp_ctx.verify(&sighash, &msg.signature, &self.counterparty_funding_pubkey()) { return Err((None, ChannelError::Close("Invalid commitment tx signature from peer".to_owned()))); } @@ -2207,7 +2225,9 @@ impl Channel { let htlc_tx = self.build_htlc_transaction(&commitment_txid, &htlc, true, &keys, feerate_per_kw); let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, &keys); let htlc_sighash = hash_to_message!(&bip143::SigHashCache::new(&htlc_tx).signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, SigHashType::All)[..]); - log_trace!(logger, "Checking HTLC tx signature {} by key {} against tx {} (sighash {}) with redeemscript {}", log_bytes!(msg.htlc_signatures[idx].serialize_compact()[..]), log_bytes!(keys.countersignatory_htlc_key.serialize()), encode::serialize_hex(&htlc_tx), log_bytes!(htlc_sighash[..]), encode::serialize_hex(&htlc_redeemscript)); + log_trace!(logger, "Checking HTLC tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}.", + log_bytes!(msg.htlc_signatures[idx].serialize_compact()[..]), log_bytes!(keys.countersignatory_htlc_key.serialize()), + encode::serialize_hex(&htlc_tx), log_bytes!(htlc_sighash[..]), encode::serialize_hex(&htlc_redeemscript), log_bytes!(self.channel_id())); if let Err(_) = self.secp_ctx.verify(&htlc_sighash, &msg.htlc_signatures[idx], &keys.countersignatory_htlc_key) { return Err((None, ChannelError::Close("Invalid HTLC tx signature from peer".to_owned()))); } @@ -2257,6 +2277,8 @@ impl Channel { Some(forward_info.clone()) } else { None }; if let Some(forward_info) = new_forward { + log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToAnnounce due to commitment_signed in channel {}.", + log_bytes!(htlc.payment_hash.0), log_bytes!(self.channel_id)); htlc.state = InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info); need_commitment = true; } @@ -2265,6 +2287,8 @@ impl Channel { if let Some(fail_reason) = if let &mut OutboundHTLCState::RemoteRemoved(ref mut fail_reason) = &mut htlc.state { Some(fail_reason.take()) } else { None } { + log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToRemove due to commitment_signed in channel {}.", + log_bytes!(htlc.payment_hash.0), log_bytes!(self.channel_id)); htlc.state = OutboundHTLCState::AwaitingRemoteRevokeToRemove(fail_reason); need_commitment = true; } @@ -2290,6 +2314,8 @@ impl Channel { self.latest_monitor_update_id = monitor_update.update_id; monitor_update.updates.append(&mut additional_update.updates); } + log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updated HTLC state but awaiting a monitor update resolution to reply.", + log_bytes!(self.channel_id)); // TODO: Call maybe_propose_first_closing_signed on restoration (or call it here and // re-send the message on restoration) return Err((Some(monitor_update), ChannelError::Ignore("Previous monitor update failure prevented generation of RAA".to_owned()))); @@ -2309,6 +2335,9 @@ impl Channel { (None, self.maybe_propose_first_closing_signed(fee_estimator)) } else { (None, None) }; + log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updating HTLC state and responding with{} a revoke_and_ack.", + log_bytes!(self.channel_id()), if commitment_signed.is_some() { " our own commitment_signed and" } else { "" }); + Ok((msgs::RevokeAndACK { channel_id: self.channel_id, per_commitment_secret, @@ -2331,7 +2360,8 @@ impl Channel { fn free_holding_cell_htlcs(&mut self, logger: &L) -> Result<(Option<(msgs::CommitmentUpdate, ChannelMonitorUpdate)>, Vec<(HTLCSource, PaymentHash)>), ChannelError> where L::Target: Logger { assert_eq!(self.channel_state & ChannelState::MonitorUpdateFailed as u32, 0); if self.holding_cell_htlc_updates.len() != 0 || self.holding_cell_update_fee.is_some() { - log_trace!(logger, "Freeing holding cell with {} HTLC updates{}", self.holding_cell_htlc_updates.len(), if self.holding_cell_update_fee.is_some() { " and a fee update" } else { "" }); + log_trace!(logger, "Freeing holding cell with {} HTLC updates{} in channel {}", self.holding_cell_htlc_updates.len(), + if self.holding_cell_update_fee.is_some() { " and a fee update" } else { "" }, log_bytes!(self.channel_id())); let mut monitor_update = ChannelMonitorUpdate { update_id: self.latest_monitor_update_id + 1, // We don't increment this yet! @@ -2357,7 +2387,8 @@ impl Channel { Err(e) => { match e { ChannelError::Ignore(ref msg) => { - log_info!(logger, "Failed to send HTLC with payment_hash {} due to {}", log_bytes!(payment_hash.0), msg); + log_info!(logger, "Failed to send HTLC with payment_hash {} due to {} in channel {}", + log_bytes!(payment_hash.0), msg, log_bytes!(self.channel_id())); // If we fail to send here, then this HTLC should // be failed backwards. Failing to send here // indicates that this HTLC may keep being put back @@ -2421,6 +2452,10 @@ impl Channel { self.latest_monitor_update_id = monitor_update.update_id; monitor_update.updates.append(&mut additional_update.updates); + log_debug!(logger, "Freeing holding cell in channel {} resulted in {}{} HTLCs added, {} HTLCs fulfilled, and {} HTLCs failed.", + log_bytes!(self.channel_id()), if update_fee.is_some() { "a fee update, " } else { "" }, + update_add_htlcs.len(), update_fulfill_htlcs.len(), update_fail_htlcs.len()); + Ok((Some((msgs::CommitmentUpdate { update_add_htlcs, update_fulfill_htlcs, @@ -2496,7 +2531,7 @@ impl Channel { self.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point); self.cur_counterparty_commitment_transaction_number -= 1; - log_trace!(logger, "Updating HTLCs on receipt of RAA..."); + log_trace!(logger, "Updating HTLCs on receipt of RAA in channel {}...", log_bytes!(self.channel_id())); let mut to_forward_infos = Vec::new(); let mut revoked_htlcs = Vec::new(); let mut update_fail_htlcs = Vec::new(); @@ -2619,6 +2654,7 @@ impl Channel { } self.monitor_pending_forwards.append(&mut to_forward_infos); self.monitor_pending_failures.append(&mut revoked_htlcs); + log_debug!(logger, "Received a valid revoke_and_ack for channel {} but awaiting a monitor update resolution to reply.", log_bytes!(self.channel_id())); return Ok((None, Vec::new(), Vec::new(), None, monitor_update, Vec::new())) } @@ -2649,6 +2685,8 @@ impl Channel { self.latest_monitor_update_id = monitor_update.update_id; monitor_update.updates.append(&mut additional_update.updates); + log_debug!(logger, "Received a valid revoke_and_ack for channel {}. Responding with a commitment update with {} HTLCs failed.", + log_bytes!(self.channel_id()), update_fail_htlcs.len() + update_fail_malformed_htlcs.len()); Ok((Some(msgs::CommitmentUpdate { update_add_htlcs: Vec::new(), update_fulfill_htlcs: Vec::new(), @@ -2658,6 +2696,7 @@ impl Channel { commitment_signed }), to_forward_infos, revoked_htlcs, None, monitor_update, htlcs_to_fail)) } else { + log_debug!(logger, "Received a valid revoke_and_ack for channel {} with no reply necessary.", log_bytes!(self.channel_id())); Ok((None, to_forward_infos, revoked_htlcs, self.maybe_propose_first_closing_signed(fee_estimator), monitor_update, htlcs_to_fail)) } } @@ -2756,7 +2795,7 @@ impl Channel { } self.channel_state |= ChannelState::PeerDisconnected as u32; - log_debug!(logger, "Peer disconnection resulted in {} remote-announced HTLC drops on channel {}", inbound_drop_count, log_bytes!(self.channel_id())); + log_trace!(logger, "Peer disconnection resulted in {} remote-announced HTLC drops on channel {}", inbound_drop_count, log_bytes!(self.channel_id())); } /// Indicates that a ChannelMonitor update failed to be stored by the client and further @@ -2823,10 +2862,9 @@ impl Channel { self.monitor_pending_revoke_and_ack = false; self.monitor_pending_commitment_signed = false; let order = self.resend_order.clone(); - log_trace!(logger, "Restored monitor updating resulting in {}{} commitment update and {} RAA, with {} first", - if funding_broadcastable.is_some() { "a funding broadcastable, " } else { "" }, - if commitment_update.is_some() { "a" } else { "no" }, - if raa.is_some() { "an" } else { "no" }, + log_debug!(logger, "Restored monitor updating in channel {} resulting in {}{} commitment update and {} RAA, with {} first", + log_bytes!(self.channel_id()), if funding_broadcastable.is_some() { "a funding broadcastable, " } else { "" }, + if commitment_update.is_some() { "a" } else { "no" }, if raa.is_some() { "an" } else { "no" }, match order { RAACommitmentOrder::CommitmentFirst => "commitment", RAACommitmentOrder::RevokeAndACKFirst => "RAA"}); (raa, commitment_update, order, forwards, failures, funding_broadcastable, funding_locked) } @@ -2904,8 +2942,8 @@ impl Channel { } } - log_trace!(logger, "Regenerated latest commitment update with {} update_adds, {} update_fulfills, {} update_fails, and {} update_fail_malformeds", - update_add_htlcs.len(), update_fulfill_htlcs.len(), update_fail_htlcs.len(), update_fail_malformed_htlcs.len()); + log_trace!(logger, "Regenerated latest commitment update in channel {} with {} update_adds, {} update_fulfills, {} update_fails, and {} update_fail_malformeds", + log_bytes!(self.channel_id()), update_add_htlcs.len(), update_fulfill_htlcs.len(), update_fail_htlcs.len(), update_fail_malformed_htlcs.len()); msgs::CommitmentUpdate { update_add_htlcs, update_fulfill_htlcs, update_fail_htlcs, update_fail_malformed_htlcs, update_fee: None, @@ -3590,12 +3628,13 @@ impl Channel { // send it immediately instead of waiting for a best_block_updated call (which // may have already happened for this block). if let Some(funding_locked) = self.check_get_funding_locked(height) { + log_info!(logger, "Sending a funding_locked to our peer for channel {}", log_bytes!(self.channel_id)); return Ok(Some(funding_locked)); } } for inp in tx.input.iter() { if inp.previous_output == funding_txo.into_bitcoin_outpoint() { - log_trace!(logger, "Detected channel-closing tx {} spending {}:{}, closing channel {}", tx.txid(), inp.previous_output.txid, inp.previous_output.vout, log_bytes!(self.channel_id())); + log_info!(logger, "Detected channel-closing tx {} spending {}:{}, closing channel {}", tx.txid(), inp.previous_output.txid, inp.previous_output.vout, log_bytes!(self.channel_id())); return Err(msgs::ErrorMessage { channel_id: self.channel_id(), data: "Commitment or closing transaction was confirmed on chain.".to_owned() @@ -3618,7 +3657,8 @@ impl Channel { /// /// May return some HTLCs (and their payment_hash) which have timed out and should be failed /// back. - pub fn best_block_updated(&mut self, height: u32, highest_header_time: u32) -> Result<(Option, Vec<(HTLCSource, PaymentHash)>), msgs::ErrorMessage> { + pub fn best_block_updated(&mut self, height: u32, highest_header_time: u32, logger: &L) + -> Result<(Option, Vec<(HTLCSource, PaymentHash)>), msgs::ErrorMessage> where L::Target: Logger { let mut timed_out_htlcs = Vec::new(); let unforwarded_htlc_cltv_limit = height + HTLC_FAIL_BACK_BUFFER; self.holding_cell_htlc_updates.retain(|htlc_update| { @@ -3636,6 +3676,7 @@ impl Channel { self.update_time_counter = cmp::max(self.update_time_counter, highest_header_time); if let Some(funding_locked) = self.check_get_funding_locked(height) { + log_info!(logger, "Sending a funding_locked to our peer for channel {}", log_bytes!(self.channel_id)); return Ok((Some(funding_locked), timed_out_htlcs)); } @@ -3668,7 +3709,7 @@ impl Channel { /// Indicates the funding transaction is no longer confirmed in the main chain. This may /// force-close the channel, but may also indicate a harmless reorganization of a block or two /// before the channel has reached funding_locked and we can just wait for more blocks. - pub fn funding_transaction_unconfirmed(&mut self) -> Result<(), msgs::ErrorMessage> { + pub fn funding_transaction_unconfirmed(&mut self, logger: &L) -> Result<(), msgs::ErrorMessage> where L::Target: Logger { if self.funding_tx_confirmation_height != 0 { // We handle the funding disconnection by calling best_block_updated with a height one // below where our funding was connected, implying a reorg back to conf_height - 1. @@ -3677,7 +3718,7 @@ impl Channel { // larger. If we don't know that time has moved forward, we can just set it to the last // time we saw and it will be ignored. let best_time = self.update_time_counter; - match self.best_block_updated(reorg_height, best_time) { + match self.best_block_updated(reorg_height, best_time, logger) { Ok((funding_locked, timed_out_htlcs)) => { assert!(funding_locked.is_none(), "We can't generate a funding with 0 confirmations?"); assert!(timed_out_htlcs.is_empty(), "We can't have accepted HTLCs with a timeout before our funding confirmation?"); @@ -3933,13 +3974,13 @@ impl Channel { let dummy_pubkey = PublicKey::from_slice(&pk).unwrap(); let data_loss_protect = if self.cur_counterparty_commitment_transaction_number + 1 < INITIAL_COMMITMENT_NUMBER { let remote_last_secret = self.commitment_secrets.get_secret(self.cur_counterparty_commitment_transaction_number + 2).unwrap(); - log_trace!(logger, "Enough info to generate a Data Loss Protect with per_commitment_secret {}", log_bytes!(remote_last_secret)); + log_trace!(logger, "Enough info to generate a Data Loss Protect with per_commitment_secret {} for channel {}", log_bytes!(remote_last_secret), log_bytes!(self.channel_id())); OptionalField::Present(DataLossProtect { your_last_per_commitment_secret: remote_last_secret, my_current_per_commitment_point: dummy_pubkey }) } else { - log_info!(logger, "Sending a data_loss_protect with no previous remote per_commitment_secret"); + log_info!(logger, "Sending a data_loss_protect with no previous remote per_commitment_secret for channel {}", log_bytes!(self.channel_id())); OptionalField::Present(DataLossProtect { your_last_per_commitment_secret: [0;32], my_current_per_commitment_point: dummy_pubkey, @@ -4206,18 +4247,17 @@ impl Channel { signature = res.0; htlc_signatures = res.1; - log_trace!(logger, "Signed remote commitment tx {} (txid {}) with redeemscript {} -> {}", + log_trace!(logger, "Signed remote commitment tx {} (txid {}) with redeemscript {} -> {} in channel {}", encode::serialize_hex(&counterparty_commitment_tx.0.trust().built_transaction().transaction), - &counterparty_commitment_txid, - encode::serialize_hex(&self.get_funding_redeemscript()), - log_bytes!(signature.serialize_compact()[..])); + &counterparty_commitment_txid, encode::serialize_hex(&self.get_funding_redeemscript()), + log_bytes!(signature.serialize_compact()[..]), log_bytes!(self.channel_id())); for (ref htlc_sig, ref htlc) in htlc_signatures.iter().zip(htlcs) { - log_trace!(logger, "Signed remote HTLC tx {} with redeemscript {} with pubkey {} -> {}", + log_trace!(logger, "Signed remote HTLC tx {} with redeemscript {} with pubkey {} -> {} in channel {}", encode::serialize_hex(&chan_utils::build_htlc_transaction(&counterparty_commitment_txid, feerate_per_kw, self.get_holder_selected_contest_delay(), htlc, &counterparty_keys.broadcaster_delayed_payment_key, &counterparty_keys.revocation_key)), encode::serialize_hex(&chan_utils::get_htlc_redeemscript(&htlc, &counterparty_keys)), log_bytes!(counterparty_keys.broadcaster_htlc_key.serialize()), - log_bytes!(htlc_sig.serialize_compact()[..])); + log_bytes!(htlc_sig.serialize_compact()[..]), log_bytes!(self.channel_id())); } } diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index cddb402d987..84e500c54d6 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -776,7 +776,7 @@ macro_rules! convert_chan_err { (false, MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore(msg), $channel_id.clone())) }, ChannelError::Close(msg) => { - log_trace!($self.logger, "Closing channel {} due to close-required error: {}", log_bytes!($channel_id[..]), msg); + log_error!($self.logger, "Closing channel {} due to close-required error: {}", log_bytes!($channel_id[..]), msg); if let Some(short_id) = $channel.get_short_channel_id() { $short_to_id.remove(&short_id); } @@ -1243,7 +1243,7 @@ impl ChannelMana #[inline] fn finish_force_close_channel(&self, shutdown_res: ShutdownResult) { let (monitor_update_option, mut failed_htlcs) = shutdown_res; - log_trace!(self.logger, "Finishing force-closure of channel {} HTLCs to fail", failed_htlcs.len()); + log_debug!(self.logger, "Finishing force-closure of channel with {} HTLCs to fail", failed_htlcs.len()); for htlc_source in failed_htlcs.drain(..) { self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }); } @@ -1274,7 +1274,7 @@ impl ChannelMana return Err(APIError::ChannelUnavailable{err: "No such channel".to_owned()}); } }; - log_trace!(self.logger, "Force-closing channel {}", log_bytes!(channel_id[..])); + log_error!(self.logger, "Force-closing channel {}", log_bytes!(channel_id[..])); self.finish_force_close_channel(chan.force_shutdown(true)); if let Ok(update) = self.get_channel_update(&chan) { let mut channel_state = self.channel_state.lock().unwrap(); @@ -1671,6 +1671,7 @@ impl ChannelMana return Err(APIError::MonitorUpdateFailed); } + log_debug!(self.logger, "Sending payment along path resulted in a commitment_signed for channel {}", log_bytes!(chan.get().channel_id())); channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { node_id: path.first().unwrap().pubkey, updates: msgs::CommitmentUpdate { @@ -2060,7 +2061,7 @@ impl ChannelMana onion_packet, .. }, incoming_shared_secret, payment_hash, amt_to_forward, outgoing_cltv_value }, prev_funding_outpoint } => { - log_trace!(self.logger, "Adding HTLC from short id {} with payment_hash {} to channel with short id {} after delay", log_bytes!(payment_hash.0), prev_short_channel_id, short_chan_id); + log_trace!(self.logger, "Adding HTLC from short id {} with payment_hash {} to channel with short id {} after delay", prev_short_channel_id, log_bytes!(payment_hash.0), short_chan_id); let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData { short_channel_id: prev_short_channel_id, outpoint: prev_funding_outpoint, @@ -2100,11 +2101,11 @@ impl ChannelMana panic!("short_channel_id != 0 should imply any pending_forward entries are of type Forward"); }, HTLCForwardInfo::FailHTLC { htlc_id, err_packet } => { - log_trace!(self.logger, "Failing HTLC back to channel with short id {} after delay", short_chan_id); + log_trace!(self.logger, "Failing HTLC back to channel with short id {} (backward HTLC ID {}) after delay", short_chan_id, htlc_id); match chan.get_mut().get_update_fail_htlc(htlc_id, err_packet, &self.logger) { Err(e) => { if let ChannelError::Ignore(msg) = e { - log_trace!(self.logger, "Failed to fail backwards to short_id {}: {}", short_chan_id, msg); + log_trace!(self.logger, "Failed to fail HTLC with ID {} backwards to short_id {}: {}", htlc_id, short_chan_id, msg); } else { panic!("Stated return value requirements in get_update_fail_htlc() were not met"); } @@ -2158,6 +2159,8 @@ impl ChannelMana handle_errors.push((chan.get().get_counterparty_node_id(), handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, true))); continue; } + log_debug!(self.logger, "Forwarding HTLCs resulted in a commitment update with {} HTLCs added and {} HTLCs failed for channel {}", + add_htlc_msgs.len(), fail_htlc_msgs.len(), log_bytes!(chan.get().channel_id())); channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { node_id: chan.get().get_counterparty_node_id(), updates: msgs::CommitmentUpdate { @@ -2664,6 +2667,8 @@ impl ChannelMana } } if let Some((msg, commitment_signed)) = msgs { + log_debug!(self.logger, "Caliming funds for HTLC with preimage {} resulted in a commitment_signed for channel {}", + log_bytes!(payment_preimage.0), log_bytes!(chan.get().channel_id())); channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { node_id: chan.get().get_counterparty_node_id(), updates: msgs::CommitmentUpdate { @@ -2927,7 +2932,7 @@ impl ChannelMana if chan.get().get_counterparty_node_id() != *counterparty_node_id { return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id)); } - try_chan_entry!(self, chan.get_mut().funding_locked(&msg), channel_state, chan); + try_chan_entry!(self, chan.get_mut().funding_locked(&msg, &self.logger), channel_state, chan); if let Some(announcement_sigs) = self.get_announcement_sigs(chan.get()) { log_trace!(self.logger, "Sending announcement_signatures for {} in response to funding_locked", log_bytes!(chan.get().channel_id())); // If we see locking block before receiving remote funding_locked, we broadcast our @@ -3433,6 +3438,7 @@ impl ChannelMana if let Err(_e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) { unimplemented!(); } + log_debug!(self.logger, "Updating fee resulted in a commitment_signed for channel {}", log_bytes!(chan.get().channel_id())); channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { node_id: chan.get().get_counterparty_node_id(), updates: msgs::CommitmentUpdate { @@ -3809,7 +3815,7 @@ where *best_block = BestBlock::new(header.prev_blockhash, new_height) } - self.do_chain_event(Some(new_height), |channel| channel.best_block_updated(new_height, header.time)); + self.do_chain_event(Some(new_height), |channel| channel.best_block_updated(new_height, header.time, &self.logger)); } } @@ -3845,7 +3851,7 @@ where *self.best_block.write().unwrap() = BestBlock::new(block_hash, height); - self.do_chain_event(Some(height), |channel| channel.best_block_updated(height, header.time)); + self.do_chain_event(Some(height), |channel| channel.best_block_updated(height, header.time, &self.logger)); macro_rules! max_time { ($timestamp: expr) => { @@ -3887,7 +3893,7 @@ where self.do_chain_event(None, |channel| { if let Some(funding_txo) = channel.get_funding_txo() { if funding_txo.txid == *txid { - channel.funding_transaction_unconfirmed().map(|_| (None, Vec::new())) + channel.funding_transaction_unconfirmed(&self.logger).map(|_| (None, Vec::new())) } else { Ok((None, Vec::new())) } } else { Ok((None, Vec::new())) } }); diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index 6298f654965..04490656e48 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -1694,7 +1694,8 @@ fn test_fee_spike_violation_fails_htlc() { }, _ => panic!("Unexpected event"), }; - nodes[1].logger.assert_log("lightning::ln::channel".to_string(), "Attempting to fail HTLC due to fee spike buffer violation".to_string(), 1); + nodes[1].logger.assert_log("lightning::ln::channel".to_string(), + format!("Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", ::hex::encode(raa_msg.channel_id)), 1); check_added_monitors!(nodes[1], 2); } @@ -6234,8 +6235,9 @@ fn test_fail_holding_cell_htlc_upon_free() { // us to surface its failure to the user. chan_stat = get_channel_value_stat!(nodes[0], chan.2); assert_eq!(chan_stat.holding_cell_outbound_amount_msat, 0); - nodes[0].logger.assert_log("lightning::ln::channel".to_string(), "Freeing holding cell with 1 HTLC updates".to_string(), 1); - let failure_log = format!("Failed to send HTLC with payment_hash {} due to Cannot send value that would put our balance under counterparty-announced channel reserve value ({})", log_bytes!(our_payment_hash.0), chan_stat.channel_reserve_msat); + nodes[0].logger.assert_log("lightning::ln::channel".to_string(), format!("Freeing holding cell with 1 HTLC updates in channel {}", hex::encode(chan.2)), 1); + let failure_log = format!("Failed to send HTLC with payment_hash {} due to Cannot send value that would put our balance under counterparty-announced channel reserve value ({}) in channel {}", + hex::encode(our_payment_hash.0), chan_stat.channel_reserve_msat, hex::encode(chan.2)); nodes[0].logger.assert_log("lightning::ln::channel".to_string(), failure_log.to_string(), 1); // Check that the payment failed to be sent out. @@ -6314,8 +6316,9 @@ fn test_free_and_fail_holding_cell_htlcs() { // to surface its failure to the user. The first payment should succeed. chan_stat = get_channel_value_stat!(nodes[0], chan.2); assert_eq!(chan_stat.holding_cell_outbound_amount_msat, 0); - nodes[0].logger.assert_log("lightning::ln::channel".to_string(), "Freeing holding cell with 2 HTLC updates".to_string(), 1); - let failure_log = format!("Failed to send HTLC with payment_hash {} due to Cannot send value that would put our balance under counterparty-announced channel reserve value ({})", log_bytes!(payment_hash_2.0), chan_stat.channel_reserve_msat); + nodes[0].logger.assert_log("lightning::ln::channel".to_string(), format!("Freeing holding cell with 2 HTLC updates in channel {}", hex::encode(chan.2)), 1); + let failure_log = format!("Failed to send HTLC with payment_hash {} due to Cannot send value that would put our balance under counterparty-announced channel reserve value ({}) in channel {}", + hex::encode(payment_hash_2.0), chan_stat.channel_reserve_msat, hex::encode(chan.2)); nodes[0].logger.assert_log("lightning::ln::channel".to_string(), failure_log.to_string(), 1); // Check that the second payment failed to be sent out. From 76ea83463b652df3694ab5ab4c13ec87bbc1910c Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Wed, 23 Jun 2021 03:24:31 +0000 Subject: [PATCH 11/14] Add additional TRACE-level logging during pathfinding in router --- lightning/src/routing/router.rs | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/lightning/src/routing/router.rs b/lightning/src/routing/router.rs index 0edeef0c3cb..06bf581c545 100644 --- a/lightning/src/routing/router.rs +++ b/lightning/src/routing/router.rs @@ -168,7 +168,7 @@ struct DummyDirectionalChannelInfo { /// so that we can choose cheaper paths (as per Dijkstra's algorithm). /// Fee values should be updated only in the context of the whole path, see update_value_and_recompute_fees. /// These fee values are useful to choose hops as we traverse the graph "payee-to-payer". -#[derive(Clone)] +#[derive(Clone, Debug)] struct PathBuildingHop<'a> { // The RouteHintHop fields which will eventually be used if this hop is used in a final Route. // Note that node_features is calculated separately after our initial graph walk. @@ -506,6 +506,8 @@ pub fn get_route(our_node_id: &PublicKey, network: &NetworkGraph, paye // - when we want to stop looking for new paths. let mut already_collected_value_msat = 0; + log_trace!(logger, "Building path from {} (payee) to {} (us/payer) for value {} msat.", payee, our_node_id, final_value_msat); + macro_rules! add_entry { // Adds entry which goes from $src_node_id to $dest_node_id // over the channel with id $chan_id with fees described in @@ -891,6 +893,8 @@ pub fn get_route(our_node_id: &PublicKey, network: &NetworkGraph, paye } } + log_trace!(logger, "Starting main path collection loop with {} nodes pre-filled from first/last hops.", targets.len()); + // At this point, targets are filled with the data from first and // last hops communicated by the caller, and the payment receiver. let mut found_new_path = false; @@ -954,6 +958,9 @@ pub fn get_route(our_node_id: &PublicKey, network: &NetworkGraph, paye ordered_hops.last_mut().unwrap().0.hop_use_fee_msat = 0; ordered_hops.last_mut().unwrap().0.cltv_expiry_delta = final_cltv; + log_trace!(logger, "Found a path back to us from the target with {} hops contributing up to {} msat: {:?}", + ordered_hops.len(), value_contribution_msat, ordered_hops); + let mut payment_path = PaymentPath {hops: ordered_hops}; // We could have possibly constructed a slightly inconsistent path: since we reduce @@ -989,8 +996,9 @@ pub fn get_route(our_node_id: &PublicKey, network: &NetworkGraph, paye // If we weren't capped by hitting a liquidity limit on a channel in the path, // we'll probably end up picking the same path again on the next iteration. // Decrease the available liquidity of a hop in the middle of the path. - let victim_liquidity = bookkeeped_channels_liquidity_available_msat.get_mut( - &payment_path.hops[(payment_path.hops.len() - 1) / 2].0.short_channel_id).unwrap(); + let victim_scid = payment_path.hops[(payment_path.hops.len() - 1) / 2].0.short_channel_id; + log_trace!(logger, "Disabling channel {} for future path building iterations to avoid duplicates.", victim_scid); + let victim_liquidity = bookkeeped_channels_liquidity_available_msat.get_mut(&victim_scid).unwrap(); *victim_liquidity = 0; } @@ -1032,6 +1040,8 @@ pub fn get_route(our_node_id: &PublicKey, network: &NetworkGraph, paye // In the latter case, making another path finding attempt won't help, // because we deterministically terminated the search due to low liquidity. if already_collected_value_msat >= recommended_value_msat || !found_new_path { + log_trace!(logger, "Have now collected {} msat (seeking {} msat) in paths. Last path loop {} a new path.", + already_collected_value_msat, recommended_value_msat, if found_new_path { "found" } else { "did not find" }); break 'paths_collection; } else if found_new_path && already_collected_value_msat == final_value_msat && payment_paths.len() == 1 { // Further, if this was our first walk of the graph, and we weren't limited by an @@ -1040,8 +1050,10 @@ pub fn get_route(our_node_id: &PublicKey, network: &NetworkGraph, paye // potentially allowing us to pay fees to meet the htlc_minimum on the new path while // still keeping a lower total fee than this path. if !hit_minimum_limit { + log_trace!(logger, "Collected exactly our payment amount on the first pass, without hitting an htlc_minimum_msat limit, exiting."); break 'paths_collection; } + log_trace!(logger, "Collected our payment amount on the first pass, but running again to collect extra paths with a potentially higher limit."); path_value_msat = recommended_value_msat; } } @@ -1152,7 +1164,7 @@ pub fn get_route(our_node_id: &PublicKey, network: &NetworkGraph, paye } let route = Route { paths: selected_paths }; - log_trace!(logger, "Got route: {}", log_route!(route)); + log_info!(logger, "Got route to {}: {}", payee, log_route!(route)); Ok(route) } From 74717d390c3ac654978d1a22123d9085729529cd Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Wed, 23 Jun 2021 03:32:32 +0000 Subject: [PATCH 12/14] Increase the log level of several channelmonitor/onchain logs. ChannelMonitor and related log entries can generally lean towards being higher log levels than they necessarily need to be, as they should be exceedingly rare, if only because they require confirmation of an on-chain transaction. --- lightning/src/chain/channelmonitor.rs | 22 +++++++++++----------- lightning/src/chain/onchaintx.rs | 16 ++++++++-------- lightning/src/chain/package.rs | 12 ++++++------ lightning/src/ln/channelmanager.rs | 2 +- 4 files changed, 26 insertions(+), 26 deletions(-) diff --git a/lightning/src/chain/channelmonitor.rs b/lightning/src/chain/channelmonitor.rs index 0bd212705d5..eba96ea6a5c 100644 --- a/lightning/src/chain/channelmonitor.rs +++ b/lightning/src/chain/channelmonitor.rs @@ -1543,7 +1543,7 @@ impl ChannelMonitorImpl { // Last, track onchain revoked commitment transaction and fail backward outgoing HTLCs as payment path is broken if !claimable_outpoints.is_empty() || per_commitment_option.is_some() { // ie we're confident this is actually ours // We're definitely a counterparty commitment transaction! - log_trace!(logger, "Got broadcast of revoked counterparty commitment transaction, going to generate general spend tx with {} inputs", claimable_outpoints.len()); + log_error!(logger, "Got broadcast of revoked counterparty commitment transaction, going to generate general spend tx with {} inputs", claimable_outpoints.len()); for (idx, outp) in tx.output.iter().enumerate() { watch_outputs.push((idx as u32, outp.clone())); } @@ -1599,7 +1599,7 @@ impl ChannelMonitorImpl { } self.counterparty_commitment_txn_on_chain.insert(commitment_txid, commitment_number); - log_trace!(logger, "Got broadcast of non-revoked counterparty commitment transaction {}", commitment_txid); + log_info!(logger, "Got broadcast of non-revoked counterparty commitment transaction {}", commitment_txid); macro_rules! check_htlc_fails { ($txid: expr, $commitment_tx: expr, $id: tt) => { @@ -1717,7 +1717,7 @@ impl ChannelMonitorImpl { let per_commitment_key = ignore_error!(SecretKey::from_slice(&secret)); let per_commitment_point = PublicKey::from_secret_key(&self.secp_ctx, &per_commitment_key); - log_trace!(logger, "Counterparty HTLC broadcast {}:{}", htlc_txid, 0); + log_error!(logger, "Got broadcast of revoked counterparty HTLC transaction, spending {}:{}", htlc_txid, 0); let revk_outp = RevokedOutput::build(per_commitment_point, self.counterparty_tx_cache.counterparty_delayed_payment_base_key, self.counterparty_tx_cache.counterparty_htlc_base_key, per_commitment_key, tx.output[0].value, self.counterparty_tx_cache.on_counterparty_tx_csv); let justice_package = PackageTemplate::build_package(htlc_txid, 0, PackageSolvingData::RevokedOutput(revk_outp), height + self.counterparty_tx_cache.on_counterparty_tx_csv as u32, true, height); let claimable_outpoints = vec!(justice_package); @@ -1808,14 +1808,14 @@ impl ChannelMonitorImpl { if self.current_holder_commitment_tx.txid == commitment_txid { is_holder_tx = true; - log_trace!(logger, "Got latest holder commitment tx broadcast, searching for available HTLCs to claim"); + log_info!(logger, "Got broadcast of latest holder commitment tx {}, searching for available HTLCs to claim", commitment_txid); let res = self.get_broadcasted_holder_claims(&self.current_holder_commitment_tx, height); let mut to_watch = self.get_broadcasted_holder_watch_outputs(&self.current_holder_commitment_tx, tx); append_onchain_update!(res, to_watch); } else if let &Some(ref holder_tx) = &self.prev_holder_signed_commitment_tx { if holder_tx.txid == commitment_txid { is_holder_tx = true; - log_trace!(logger, "Got previous holder commitment tx broadcast, searching for available HTLCs to claim"); + log_info!(logger, "Got broadcast of previous holder commitment tx {}, searching for available HTLCs to claim", commitment_txid); let res = self.get_broadcasted_holder_claims(holder_tx, height); let mut to_watch = self.get_broadcasted_holder_watch_outputs(holder_tx, tx); append_onchain_update!(res, to_watch); @@ -1845,7 +1845,7 @@ impl ChannelMonitorImpl { } pub fn get_latest_holder_commitment_txn(&mut self, logger: &L) -> Vec where L::Target: Logger { - log_trace!(logger, "Getting signed latest holder commitment transaction!"); + log_debug!(logger, "Getting signed latest holder commitment transaction!"); self.holder_tx_signed = true; let commitment_tx = self.onchain_tx_handler.get_fully_signed_holder_tx(&self.funding_redeemscript); let txid = commitment_tx.txid(); @@ -1879,7 +1879,7 @@ impl ChannelMonitorImpl { #[cfg(any(test,feature = "unsafe_revoked_tx_signing"))] /// Note that this includes possibly-locktimed-in-the-future transactions! fn unsafe_get_latest_holder_commitment_txn(&mut self, logger: &L) -> Vec where L::Target: Logger { - log_trace!(logger, "Getting signed copy of latest holder commitment transaction!"); + log_debug!(logger, "Getting signed copy of latest holder commitment transaction!"); let commitment_tx = self.onchain_tx_handler.get_fully_signed_copy_holder_tx(&self.funding_redeemscript); let txid = commitment_tx.txid(); let mut holder_transactions = vec![commitment_tx]; @@ -2086,7 +2086,7 @@ impl ChannelMonitorImpl { matured_htlcs.push(source.clone()); } - log_trace!(logger, "HTLC {} failure update has got enough confirmations to be passed upstream", log_bytes!(payment_hash.0)); + log_debug!(logger, "HTLC {} failure update has got enough confirmations to be passed upstream", log_bytes!(payment_hash.0)); self.pending_monitor_events.push(MonitorEvent::HTLCEvent(HTLCUpdate { payment_hash: payment_hash, payment_preimage: None, @@ -2094,7 +2094,7 @@ impl ChannelMonitorImpl { })); }, OnchainEvent::MaturingOutput { descriptor } => { - log_trace!(logger, "Descriptor {} has got enough confirmations to be passed upstream", log_spendable!(descriptor)); + log_debug!(logger, "Descriptor {} has got enough confirmations to be passed upstream", log_spendable!(descriptor)); self.pending_events.push(Event::SpendableOutputs { outputs: vec![descriptor] }); @@ -2398,7 +2398,7 @@ impl ChannelMonitorImpl { height, event: OnchainEvent::HTLCUpdate { source: source, payment_hash: payment_hash }, }; - log_info!(logger, "Failing HTLC with payment_hash {} timeout by a spend tx, waiting for confirmation (at height{})", log_bytes!(payment_hash.0), entry.confirmation_threshold()); + log_info!(logger, "Failing HTLC with payment_hash {} timeout by a spend tx, waiting for confirmation (at height {})", log_bytes!(payment_hash.0), entry.confirmation_threshold()); self.onchain_events_awaiting_threshold_conf.push(entry); } } @@ -2463,7 +2463,7 @@ impl ChannelMonitorImpl { height: height, event: OnchainEvent::MaturingOutput { descriptor: spendable_output.clone() }, }; - log_trace!(logger, "Maturing {} until {}", log_spendable!(spendable_output), entry.confirmation_threshold()); + log_info!(logger, "Received spendable output {}, spendable at height {}", log_spendable!(spendable_output), entry.confirmation_threshold()); self.onchain_events_awaiting_threshold_conf.push(entry); } } diff --git a/lightning/src/chain/onchaintx.rs b/lightning/src/chain/onchaintx.rs index 30493eb56c6..cb34c59b399 100644 --- a/lightning/src/chain/onchaintx.rs +++ b/lightning/src/chain/onchaintx.rs @@ -382,7 +382,7 @@ impl OnchainTxHandler { F::Target: FeeEstimator, L::Target: Logger, { - log_trace!(logger, "Updating claims view at height {} with {} matched transactions and {} claim requests", height, txn_matched.len(), requests.len()); + log_debug!(logger, "Updating claims view at height {} with {} matched transactions and {} claim requests", height, txn_matched.len(), requests.len()); let mut preprocessed_requests = Vec::with_capacity(requests.len()); let mut aggregated_request = None; @@ -391,20 +391,20 @@ impl OnchainTxHandler { for req in requests { // Don't claim a outpoint twice that would be bad for privacy and may uselessly lock a CPFP input for a while if let Some(_) = self.claimable_outpoints.get(req.outpoints()[0]) { - log_trace!(logger, "Ignoring second claim for outpoint {}:{}, already registered its claiming request", req.outpoints()[0].txid, req.outpoints()[0].vout); + log_info!(logger, "Ignoring second claim for outpoint {}:{}, already registered its claiming request", req.outpoints()[0].txid, req.outpoints()[0].vout); } else { let timelocked_equivalent_package = self.locktimed_packages.iter().map(|v| v.1.iter()).flatten() .find(|locked_package| locked_package.outpoints() == req.outpoints()); if let Some(package) = timelocked_equivalent_package { - log_trace!(logger, "Ignoring second claim for outpoint {}:{}, we already have one which we're waiting on a timelock at {} for.", + log_info!(logger, "Ignoring second claim for outpoint {}:{}, we already have one which we're waiting on a timelock at {} for.", req.outpoints()[0].txid, req.outpoints()[0].vout, package.package_timelock()); continue; } if req.package_timelock() > height + 1 { - log_debug!(logger, "Delaying claim of package until its timelock at {} (current height {}), the following outpoints are spent:", req.package_timelock(), height); + log_info!(logger, "Delaying claim of package until its timelock at {} (current height {}), the following outpoints are spent:", req.package_timelock(), height); for outpoint in req.outpoints() { - log_debug!(logger, " Outpoint {}", outpoint); + log_info!(logger, " Outpoint {}", outpoint); } self.locktimed_packages.entry(req.package_timelock()).or_insert(Vec::new()).push(req); continue; @@ -441,11 +441,11 @@ impl OnchainTxHandler { req.set_feerate(new_feerate); let txid = tx.txid(); for k in req.outpoints() { - log_trace!(logger, "Registering claiming request for {}:{}", k.txid, k.vout); + log_info!(logger, "Registering claiming request for {}:{}", k.txid, k.vout); self.claimable_outpoints.insert(k.clone(), (txid, height)); } self.pending_claim_requests.insert(txid, req); - log_trace!(logger, "Broadcasting onchain {}", log_tx!(tx)); + log_info!(logger, "Broadcasting onchain {}", log_tx!(tx)); broadcaster.broadcast_transaction(&tx); } } @@ -562,7 +562,7 @@ impl OnchainTxHandler { log_trace!(logger, "Bumping {} candidates", bump_candidates.len()); for (first_claim_txid, request) in bump_candidates.iter() { if let Some((new_timer, new_feerate, bump_tx)) = self.generate_claim_tx(height, &request, &*fee_estimator, &*logger) { - log_trace!(logger, "Broadcasting onchain {}", log_tx!(bump_tx)); + log_info!(logger, "Broadcasting RBF-bumped onchain {}", log_tx!(bump_tx)); broadcaster.broadcast_transaction(&bump_tx); if let Some(request) = self.pending_claim_requests.get_mut(first_claim_txid) { request.set_timer(new_timer); diff --git a/lightning/src/chain/package.rs b/lightning/src/chain/package.rs index bd983ecd916..bda924d5565 100644 --- a/lightning/src/chain/package.rs +++ b/lightning/src/chain/package.rs @@ -603,18 +603,18 @@ impl PackageTemplate { }); } for (i, (outpoint, out)) in self.inputs.iter().enumerate() { - log_trace!(logger, "Adding claiming input for outpoint {}:{}", outpoint.txid, outpoint.vout); + log_debug!(logger, "Adding claiming input for outpoint {}:{}", outpoint.txid, outpoint.vout); if !out.finalize_input(&mut bumped_tx, i, onchain_handler) { return None; } } - log_trace!(logger, "Finalized transaction {} ready to broadcast", bumped_tx.txid()); + log_debug!(logger, "Finalized transaction {} ready to broadcast", bumped_tx.txid()); return Some(bumped_tx); }, PackageMalleability::Untractable => { debug_assert_eq!(value, 0, "value is ignored for non-malleable packages, should be zero to ensure callsites are correct"); if let Some((outpoint, outp)) = self.inputs.first() { if let Some(final_tx) = outp.get_finalized_tx(outpoint, onchain_handler) { - log_trace!(logger, "Adding claiming input for outpoint {}:{}", outpoint.txid, outpoint.vout); - log_trace!(logger, "Finalized transaction {} ready to broadcast", final_tx.txid()); + log_debug!(logger, "Adding claiming input for outpoint {}:{}", outpoint.txid, outpoint.vout); + log_debug!(logger, "Finalized transaction {} ready to broadcast", final_tx.txid()); return Some(final_tx); } return None; @@ -794,13 +794,13 @@ fn feerate_bump(predicted_weight: usize, input_amounts: u64, // ...else just increase the previous feerate by 25% (because that's a nice number) let new_fee = previous_feerate * (predicted_weight as u64) / 750; if input_amounts <= new_fee { - log_trace!(logger, "Can't 25% bump new claiming tx, amount {} is too small", input_amounts); + log_warn!(logger, "Can't 25% bump new claiming tx, amount {} is too small", input_amounts); return None; } new_fee } } else { - log_trace!(logger, "Can't new-estimation bump new claiming tx, amount {} is too small", input_amounts); + log_warn!(logger, "Can't new-estimation bump new claiming tx, amount {} is too small", input_amounts); return None; }; diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 84e500c54d6..a9e714cea7b 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -2667,7 +2667,7 @@ impl ChannelMana } } if let Some((msg, commitment_signed)) = msgs { - log_debug!(self.logger, "Caliming funds for HTLC with preimage {} resulted in a commitment_signed for channel {}", + log_debug!(self.logger, "Claiming funds for HTLC with preimage {} resulted in a commitment_signed for channel {}", log_bytes!(payment_preimage.0), log_bytes!(chan.get().channel_id())); channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { node_id: chan.get().get_counterparty_node_id(), From 6d446a6249b7693bad54cf8168c8ee094be35534 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Sun, 27 Jun 2021 03:20:36 +0000 Subject: [PATCH 13/14] Correct inbound HTLC upgrade logs on revoke_and_ack receipt --- lightning/src/ln/channel.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 26dd61d686f..06308b01cee 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -2568,10 +2568,8 @@ impl Channel { }); for htlc in pending_inbound_htlcs.iter_mut() { let swap = if let &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) = &htlc.state { - log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to Committed", log_bytes!(htlc.payment_hash.0)); true } else if let &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) = &htlc.state { - log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to Committed", log_bytes!(htlc.payment_hash.0)); true } else { false }; if swap { @@ -2579,11 +2577,13 @@ impl Channel { mem::swap(&mut state, &mut htlc.state); if let InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info) = state { + log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", log_bytes!(htlc.payment_hash.0)); htlc.state = InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info); require_commitment = true; } else if let InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info) = state { match forward_info { PendingHTLCStatus::Fail(fail_msg) => { + log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to LocalRemoved due to PendingHTLCStatus indicating failure", log_bytes!(htlc.payment_hash.0)); require_commitment = true; match fail_msg { HTLCFailureMsg::Relay(msg) => { @@ -2597,6 +2597,7 @@ impl Channel { } }, PendingHTLCStatus::Forward(forward_info) => { + log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to Committed", log_bytes!(htlc.payment_hash.0)); to_forward_infos.push((forward_info, htlc.htlc_id)); htlc.state = InboundHTLCState::Committed; } From 6d98aedaf5dba6565839aa1b806ed730e0c7935c Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Sun, 27 Jun 2021 22:02:15 +0000 Subject: [PATCH 14/14] Add debug log when we stop tracking confirmed on-chain packages --- lightning/src/chain/onchaintx.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lightning/src/chain/onchaintx.rs b/lightning/src/chain/onchaintx.rs index cb34c59b399..19bbed58b73 100644 --- a/lightning/src/chain/onchaintx.rs +++ b/lightning/src/chain/onchaintx.rs @@ -536,11 +536,14 @@ impl OnchainTxHandler { // been aggregated in a single tx and claimed so atomically if let Some(request) = self.pending_claim_requests.remove(&claim_request) { for outpoint in request.outpoints() { + log_debug!(logger, "Removing claim tracking for {} due to maturation of claim tx {}.", outpoint, claim_request); self.claimable_outpoints.remove(&outpoint); } } }, OnchainEvent::ContentiousOutpoint { package } => { + log_debug!(logger, "Removing claim tracking due to maturation of claim tx for outpoints:"); + log_debug!(logger, " {:?}", package.outpoints()); self.claimable_outpoints.remove(&package.outpoints()[0]); } }