From 2e02aa7dab1228863b602b7db7b67d8bd7dbee9d Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Tue, 17 Aug 2021 00:03:31 +0000 Subject: [PATCH 1/4] Ensure we always at least log at TRACE when a peer disconnects --- lightning/src/ln/peer_handler.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/lightning/src/ln/peer_handler.rs b/lightning/src/ln/peer_handler.rs index 42b2694111f..234b509564d 100644 --- a/lightning/src/ln/peer_handler.rs +++ b/lightning/src/ln/peer_handler.rs @@ -667,6 +667,7 @@ impl PeerManager Ok(res), Err(e) => { + log_trace!(self.logger, "Peer sent invalid data or we decided to disconnect due to a protocol error"); self.disconnect_event_internal(peer_descriptor, e.no_connection_possible); Err(e) } @@ -1344,6 +1345,9 @@ impl PeerManager { match peer.their_node_id { Some(node_id) => { + log_trace!(self.logger, + "Handling disconnection of peer {}, with {}future connection to the peer possible.", + log_pubkey!(node_id), if no_connection_possible { "no " } else { "" }); peers.node_id_to_descriptor.remove(&node_id); self.message_handler.chan_handler.peer_disconnected(&node_id, no_connection_possible); }, From ecf1d656ef4b48a155797e5825359ba2bb670d19 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Tue, 17 Aug 2021 00:04:34 +0000 Subject: [PATCH 2/4] Stop printing to stderr in lightning-net-tokio for disconnections It isn't exactly a critical error situation when we disconnect a socket, so we shouldn't be printing to stderr, entirely bypassing user logging, when it happens. We do still print to stderr if we fail to write the first message to the socket, but this should never happen unless the user has a reasonably-configured system with at least one packet in bytes available for the socket buffer. --- lightning-net-tokio/src/lib.rs | 19 ++++++------------- 1 file changed, 6 insertions(+), 13 deletions(-) diff --git a/lightning-net-tokio/src/lib.rs b/lightning-net-tokio/src/lib.rs index 5f5fece0d2e..d2ee100281e 100644 --- a/lightning-net-tokio/src/lib.rs +++ b/lightning-net-tokio/src/lib.rs @@ -141,30 +141,23 @@ impl Connection { PeerDisconnected } let disconnect_type = loop { - macro_rules! shutdown_socket { - ($err: expr, $need_disconnect: expr) => { { - println!("Disconnecting peer due to {}!", $err); - break $need_disconnect; - } } - } - let read_paused = { let us_lock = us.lock().unwrap(); if us_lock.rl_requested_disconnect { - shutdown_socket!("disconnect_socket() call from RL", Disconnect::CloseConnection); + break Disconnect::CloseConnection; } us_lock.read_paused }; tokio::select! { v = write_avail_receiver.recv() => { assert!(v.is_some()); // We can't have dropped the sending end, its in the us Arc! - if let Err(e) = peer_manager.write_buffer_space_avail(&mut our_descriptor) { - shutdown_socket!(e, Disconnect::CloseConnection); + if let Err(_) = peer_manager.write_buffer_space_avail(&mut our_descriptor) { + break Disconnect::CloseConnection; } }, _ = read_wake_receiver.recv() => {}, read = reader.read(&mut buf), if !read_paused => match read { - Ok(0) => shutdown_socket!("Connection closed", Disconnect::PeerDisconnected), + Ok(0) => break Disconnect::PeerDisconnected, Ok(len) => { let read_res = peer_manager.read_event(&mut our_descriptor, &buf[0..len]); let mut us_lock = us.lock().unwrap(); @@ -174,10 +167,10 @@ impl Connection { us_lock.read_paused = true; } }, - Err(e) => shutdown_socket!(e, Disconnect::CloseConnection), + Err(_) => break Disconnect::CloseConnection, } }, - Err(e) => shutdown_socket!(e, Disconnect::PeerDisconnected), + Err(_) => break Disconnect::PeerDisconnected, }, } peer_manager.process_events(); From 1e7d4798eb531ae3255b2fb82f07af81b56019a3 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Tue, 17 Aug 2021 00:46:21 +0000 Subject: [PATCH 3/4] Log chain calls in ChainMonitor, reducing logs in ChannelMonitor For users with many ChannelMonitors, we log a large volume per block simply because each ChannelMonitor lots several times per block. Instead, we move to log only once at the TRACE level per block call in ChannelMonitors, relying instead on a DEBUG level log in ChainMonitor before we call any ChannelMonitor functions. For most users, this will reduce redundant logging and also log at the DEBUG level for block events, which is appropriate. Fixes #980. --- lightning/src/chain/chainmonitor.rs | 5 +++++ lightning/src/chain/channelmonitor.rs | 4 +--- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/lightning/src/chain/chainmonitor.rs b/lightning/src/chain/chainmonitor.rs index 12bfebbc4ae..8969427a0f9 100644 --- a/lightning/src/chain/chainmonitor.rs +++ b/lightning/src/chain/chainmonitor.rs @@ -162,6 +162,7 @@ where fn block_connected(&self, block: &Block, height: u32) { let header = &block.header; let txdata: Vec<_> = block.txdata.iter().enumerate().collect(); + log_debug!(self.logger, "New best block {} at height {} provided via block_connected", header.block_hash(), height); self.process_chain_data(header, &txdata, |monitor, txdata| { monitor.block_connected( header, txdata, height, &*self.broadcaster, &*self.fee_estimator, &*self.logger) @@ -170,6 +171,7 @@ where fn block_disconnected(&self, header: &BlockHeader, height: u32) { let monitors = self.monitors.read().unwrap(); + log_debug!(self.logger, "Latest block {} at height {} removed via block_disconnected", header.block_hash(), height); for monitor in monitors.values() { monitor.block_disconnected( header, height, &*self.broadcaster, &*self.fee_estimator, &*self.logger); @@ -187,6 +189,7 @@ where P::Target: channelmonitor::Persist, { fn transactions_confirmed(&self, header: &BlockHeader, txdata: &TransactionData, height: u32) { + log_debug!(self.logger, "{} provided transactions confirmed at height {} in block {}", txdata.len(), height, header.block_hash()); self.process_chain_data(header, txdata, |monitor, txdata| { monitor.transactions_confirmed( header, txdata, height, &*self.broadcaster, &*self.fee_estimator, &*self.logger) @@ -194,6 +197,7 @@ where } fn transaction_unconfirmed(&self, txid: &Txid) { + log_debug!(self.logger, "Transaction {} reorganized out of chain", txid); let monitors = self.monitors.read().unwrap(); for monitor in monitors.values() { monitor.transaction_unconfirmed(txid, &*self.broadcaster, &*self.fee_estimator, &*self.logger); @@ -201,6 +205,7 @@ where } fn best_block_updated(&self, header: &BlockHeader, height: u32) { + log_debug!(self.logger, "New best block {} at height {} provided via best_block_updated", header.block_hash(), height); self.process_chain_data(header, &[], |monitor, txdata| { // While in practice there shouldn't be any recursive calls when given empty txdata, // it's still possible if a chain::Filter implementation returns a transaction. diff --git a/lightning/src/chain/channelmonitor.rs b/lightning/src/chain/channelmonitor.rs index 9c381a35030..dc009c4d52c 100644 --- a/lightning/src/chain/channelmonitor.rs +++ b/lightning/src/chain/channelmonitor.rs @@ -1913,7 +1913,6 @@ impl ChannelMonitorImpl { L::Target: Logger, { let block_hash = header.block_hash(); - log_trace!(logger, "New best block {} at height {}", block_hash, height); self.best_block = BestBlock::new(block_hash, height); self.transactions_confirmed(header, txdata, height, broadcaster, fee_estimator, logger) @@ -1933,7 +1932,6 @@ impl ChannelMonitorImpl { L::Target: Logger, { let block_hash = header.block_hash(); - log_trace!(logger, "New best block {} at height {}", block_hash, height); if height > self.best_block.height() { self.best_block = BestBlock::new(block_hash, height); @@ -1971,7 +1969,6 @@ impl ChannelMonitorImpl { } let block_hash = header.block_hash(); - log_trace!(logger, "Block {} at height {} connected with {} txn matched", block_hash, height, txn_matched.len()); let mut watch_outputs = Vec::new(); let mut claimable_outpoints = Vec::new(); @@ -2044,6 +2041,7 @@ impl ChannelMonitorImpl { F::Target: FeeEstimator, L::Target: Logger, { + log_trace!(logger, "Processing {} matched transactions for block at height {}.", txn_matched.len(), conf_height); debug_assert!(self.best_block.height() >= conf_height); let should_broadcast = self.should_broadcast_holder_commitment_txn(logger); From 35d0b7aae73760df48f814db6ce4b72e8ee5523a Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Tue, 17 Aug 2021 19:47:50 +0000 Subject: [PATCH 4/4] Clarify docs on ShutdownScript::new_witness_program somewhat This is especially important for C or other language bindings clients as the `version` field may be exported as a `u8`. --- lightning/src/ln/script.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/lightning/src/ln/script.rs b/lightning/src/ln/script.rs index 3abb334ce47..4e81d76ad67 100644 --- a/lightning/src/ln/script.rs +++ b/lightning/src/ln/script.rs @@ -86,7 +86,10 @@ impl ShutdownScript { Self(ShutdownScriptImpl::Bolt2(Script::new_v0_wsh(script_hash))) } - /// Generates a P2WSH script pubkey from the given segwit version and program. + /// Generates a witness script pubkey from the given segwit version and program. + /// + /// Note for version-zero witness scripts you must use [`ShutdownScript::new_p2wpkh`] or + /// [`ShutdownScript::new_p2wsh`] instead. /// /// # Errors ///