Skip to content

Cleanup logging #965

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 14 commits into from
Jun 29, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 0 additions & 6 deletions fuzz/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -24,12 +24,6 @@ hex = "0.3"
honggfuzz = { version = "0.5", optional = true }
libfuzzer-sys = { git = "https://github.com/rust-fuzz/libfuzzer-sys.git", optional = true }

[patch.crates-io]
# Rust-Secp256k1 PR 282. This patch should be dropped once that is merged.
secp256k1 = { git = 'https://github.com/TheBlueMatt/rust-secp256k1', rev = 'c8615128097e0205dcf69e515e56fb57e5c97138' }
# bitcoin_hashes PR 111 (without the top commit). This patch should be dropped once that is merged.
bitcoin_hashes = { git = 'https://github.com/TheBlueMatt/bitcoin_hashes', rev = 'c90d26339a3e34fd2f942aa80298f410cc41b743' }

[build-dependencies]
cc = "1.0"

Expand Down
2 changes: 1 addition & 1 deletion fuzz/src/utils/test_logger.rs
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ impl<'a, Out: Output> Write for LockedWriteAdapter<'a, Out> {
impl<Out: Output> Logger for TestLogger<Out> {
fn log(&self, record: &Record) {
write!(LockedWriteAdapter(&self.out),
"{:<5} {} [{} : {}, {}] {}\n", record.level.to_string(), self.id, record.module_path, record.file, record.line, record.args)
"{:<5} {} [{} : {}] {}\n", record.level.to_string(), self.id, record.module_path, record.line, record.args)
.unwrap();
}
}
22 changes: 11 additions & 11 deletions lightning/src/chain/channelmonitor.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1543,7 +1543,7 @@ impl<Signer: Sign> ChannelMonitorImpl<Signer> {
// Last, track onchain revoked commitment transaction and fail backward outgoing HTLCs as payment path is broken
if !claimable_outpoints.is_empty() || per_commitment_option.is_some() { // ie we're confident this is actually ours
// We're definitely a counterparty commitment transaction!
log_trace!(logger, "Got broadcast of revoked counterparty commitment transaction, going to generate general spend tx with {} inputs", claimable_outpoints.len());
log_error!(logger, "Got broadcast of revoked counterparty commitment transaction, going to generate general spend tx with {} inputs", claimable_outpoints.len());
for (idx, outp) in tx.output.iter().enumerate() {
watch_outputs.push((idx as u32, outp.clone()));
}
Expand Down Expand Up @@ -1599,7 +1599,7 @@ impl<Signer: Sign> ChannelMonitorImpl<Signer> {
}
self.counterparty_commitment_txn_on_chain.insert(commitment_txid, commitment_number);

log_trace!(logger, "Got broadcast of non-revoked counterparty commitment transaction {}", commitment_txid);
log_info!(logger, "Got broadcast of non-revoked counterparty commitment transaction {}", commitment_txid);

macro_rules! check_htlc_fails {
($txid: expr, $commitment_tx: expr, $id: tt) => {
Expand Down Expand Up @@ -1717,7 +1717,7 @@ impl<Signer: Sign> ChannelMonitorImpl<Signer> {
let per_commitment_key = ignore_error!(SecretKey::from_slice(&secret));
let per_commitment_point = PublicKey::from_secret_key(&self.secp_ctx, &per_commitment_key);

log_trace!(logger, "Counterparty HTLC broadcast {}:{}", htlc_txid, 0);
log_error!(logger, "Got broadcast of revoked counterparty HTLC transaction, spending {}:{}", htlc_txid, 0);
let revk_outp = RevokedOutput::build(per_commitment_point, self.counterparty_tx_cache.counterparty_delayed_payment_base_key, self.counterparty_tx_cache.counterparty_htlc_base_key, per_commitment_key, tx.output[0].value, self.counterparty_tx_cache.on_counterparty_tx_csv);
let justice_package = PackageTemplate::build_package(htlc_txid, 0, PackageSolvingData::RevokedOutput(revk_outp), height + self.counterparty_tx_cache.on_counterparty_tx_csv as u32, true, height);
let claimable_outpoints = vec!(justice_package);
Expand Down Expand Up @@ -1808,14 +1808,14 @@ impl<Signer: Sign> ChannelMonitorImpl<Signer> {

if self.current_holder_commitment_tx.txid == commitment_txid {
is_holder_tx = true;
log_trace!(logger, "Got latest holder commitment tx broadcast, searching for available HTLCs to claim");
log_info!(logger, "Got broadcast of latest holder commitment tx {}, searching for available HTLCs to claim", commitment_txid);
let res = self.get_broadcasted_holder_claims(&self.current_holder_commitment_tx, height);
let mut to_watch = self.get_broadcasted_holder_watch_outputs(&self.current_holder_commitment_tx, tx);
append_onchain_update!(res, to_watch);
} else if let &Some(ref holder_tx) = &self.prev_holder_signed_commitment_tx {
if holder_tx.txid == commitment_txid {
is_holder_tx = true;
log_trace!(logger, "Got previous holder commitment tx broadcast, searching for available HTLCs to claim");
log_info!(logger, "Got broadcast of previous holder commitment tx {}, searching for available HTLCs to claim", commitment_txid);
let res = self.get_broadcasted_holder_claims(holder_tx, height);
let mut to_watch = self.get_broadcasted_holder_watch_outputs(holder_tx, tx);
append_onchain_update!(res, to_watch);
Expand Down Expand Up @@ -1845,7 +1845,7 @@ impl<Signer: Sign> ChannelMonitorImpl<Signer> {
}

pub fn get_latest_holder_commitment_txn<L: Deref>(&mut self, logger: &L) -> Vec<Transaction> where L::Target: Logger {
log_trace!(logger, "Getting signed latest holder commitment transaction!");
log_debug!(logger, "Getting signed latest holder commitment transaction!");
self.holder_tx_signed = true;
let commitment_tx = self.onchain_tx_handler.get_fully_signed_holder_tx(&self.funding_redeemscript);
let txid = commitment_tx.txid();
Expand Down Expand Up @@ -1879,7 +1879,7 @@ impl<Signer: Sign> ChannelMonitorImpl<Signer> {
#[cfg(any(test,feature = "unsafe_revoked_tx_signing"))]
/// Note that this includes possibly-locktimed-in-the-future transactions!
fn unsafe_get_latest_holder_commitment_txn<L: Deref>(&mut self, logger: &L) -> Vec<Transaction> where L::Target: Logger {
log_trace!(logger, "Getting signed copy of latest holder commitment transaction!");
log_debug!(logger, "Getting signed copy of latest holder commitment transaction!");
let commitment_tx = self.onchain_tx_handler.get_fully_signed_copy_holder_tx(&self.funding_redeemscript);
let txid = commitment_tx.txid();
let mut holder_transactions = vec![commitment_tx];
Expand Down Expand Up @@ -2086,15 +2086,15 @@ impl<Signer: Sign> ChannelMonitorImpl<Signer> {
matured_htlcs.push(source.clone());
}

log_trace!(logger, "HTLC {} failure update has got enough confirmations to be passed upstream", log_bytes!(payment_hash.0));
log_debug!(logger, "HTLC {} failure update has got enough confirmations to be passed upstream", log_bytes!(payment_hash.0));
self.pending_monitor_events.push(MonitorEvent::HTLCEvent(HTLCUpdate {
payment_hash: payment_hash,
payment_preimage: None,
source: source.clone(),
}));
},
OnchainEvent::MaturingOutput { descriptor } => {
log_trace!(logger, "Descriptor {} has got enough confirmations to be passed upstream", log_spendable!(descriptor));
log_debug!(logger, "Descriptor {} has got enough confirmations to be passed upstream", log_spendable!(descriptor));
self.pending_events.push(Event::SpendableOutputs {
outputs: vec![descriptor]
});
Expand Down Expand Up @@ -2398,7 +2398,7 @@ impl<Signer: Sign> ChannelMonitorImpl<Signer> {
height,
event: OnchainEvent::HTLCUpdate { source: source, payment_hash: payment_hash },
};
log_info!(logger, "Failing HTLC with payment_hash {} timeout by a spend tx, waiting for confirmation (at height{})", log_bytes!(payment_hash.0), entry.confirmation_threshold());
log_info!(logger, "Failing HTLC with payment_hash {} timeout by a spend tx, waiting for confirmation (at height {})", log_bytes!(payment_hash.0), entry.confirmation_threshold());
self.onchain_events_awaiting_threshold_conf.push(entry);
}
}
Expand Down Expand Up @@ -2463,7 +2463,7 @@ impl<Signer: Sign> ChannelMonitorImpl<Signer> {
height: height,
event: OnchainEvent::MaturingOutput { descriptor: spendable_output.clone() },
};
log_trace!(logger, "Maturing {} until {}", log_spendable!(spendable_output), entry.confirmation_threshold());
log_info!(logger, "Received spendable output {}, spendable at height {}", log_spendable!(spendable_output), entry.confirmation_threshold());
self.onchain_events_awaiting_threshold_conf.push(entry);
}
}
Expand Down
19 changes: 11 additions & 8 deletions lightning/src/chain/onchaintx.rs
Original file line number Diff line number Diff line change
Expand Up @@ -382,7 +382,7 @@ impl<ChannelSigner: Sign> OnchainTxHandler<ChannelSigner> {
F::Target: FeeEstimator,
L::Target: Logger,
{
log_trace!(logger, "Updating claims view at height {} with {} matched transactions and {} claim requests", height, txn_matched.len(), requests.len());
log_debug!(logger, "Updating claims view at height {} with {} matched transactions and {} claim requests", height, txn_matched.len(), requests.len());
let mut preprocessed_requests = Vec::with_capacity(requests.len());
let mut aggregated_request = None;

Expand All @@ -391,20 +391,20 @@ impl<ChannelSigner: Sign> OnchainTxHandler<ChannelSigner> {
for req in requests {
// Don't claim a outpoint twice that would be bad for privacy and may uselessly lock a CPFP input for a while
if let Some(_) = self.claimable_outpoints.get(req.outpoints()[0]) {
log_trace!(logger, "Ignoring second claim for outpoint {}:{}, already registered its claiming request", req.outpoints()[0].txid, req.outpoints()[0].vout);
log_info!(logger, "Ignoring second claim for outpoint {}:{}, already registered its claiming request", req.outpoints()[0].txid, req.outpoints()[0].vout);
} else {
let timelocked_equivalent_package = self.locktimed_packages.iter().map(|v| v.1.iter()).flatten()
.find(|locked_package| locked_package.outpoints() == req.outpoints());
if let Some(package) = timelocked_equivalent_package {
log_trace!(logger, "Ignoring second claim for outpoint {}:{}, we already have one which we're waiting on a timelock at {} for.",
log_info!(logger, "Ignoring second claim for outpoint {}:{}, we already have one which we're waiting on a timelock at {} for.",
req.outpoints()[0].txid, req.outpoints()[0].vout, package.package_timelock());
continue;
}

if req.package_timelock() > height + 1 {
log_debug!(logger, "Delaying claim of package until its timelock at {} (current height {}), the following outpoints are spent:", req.package_timelock(), height);
log_info!(logger, "Delaying claim of package until its timelock at {} (current height {}), the following outpoints are spent:", req.package_timelock(), height);
for outpoint in req.outpoints() {
log_debug!(logger, " Outpoint {}", outpoint);
log_info!(logger, " Outpoint {}", outpoint);
}
self.locktimed_packages.entry(req.package_timelock()).or_insert(Vec::new()).push(req);
continue;
Expand Down Expand Up @@ -441,11 +441,11 @@ impl<ChannelSigner: Sign> OnchainTxHandler<ChannelSigner> {
req.set_feerate(new_feerate);
let txid = tx.txid();
for k in req.outpoints() {
log_trace!(logger, "Registering claiming request for {}:{}", k.txid, k.vout);
log_info!(logger, "Registering claiming request for {}:{}", k.txid, k.vout);
self.claimable_outpoints.insert(k.clone(), (txid, height));
}
self.pending_claim_requests.insert(txid, req);
log_trace!(logger, "Broadcasting onchain {}", log_tx!(tx));
log_info!(logger, "Broadcasting onchain {}", log_tx!(tx));
broadcaster.broadcast_transaction(&tx);
}
}
Expand Down Expand Up @@ -536,11 +536,14 @@ impl<ChannelSigner: Sign> OnchainTxHandler<ChannelSigner> {
// been aggregated in a single tx and claimed so atomically
if let Some(request) = self.pending_claim_requests.remove(&claim_request) {
for outpoint in request.outpoints() {
log_debug!(logger, "Removing claim tracking for {} due to maturation of claim tx {}.", outpoint, claim_request);
self.claimable_outpoints.remove(&outpoint);
}
}
},
OnchainEvent::ContentiousOutpoint { package } => {
log_debug!(logger, "Removing claim tracking due to maturation of claim tx for outpoints:");
log_debug!(logger, " {:?}", package.outpoints());
self.claimable_outpoints.remove(&package.outpoints()[0]);
}
}
Expand All @@ -562,7 +565,7 @@ impl<ChannelSigner: Sign> OnchainTxHandler<ChannelSigner> {
log_trace!(logger, "Bumping {} candidates", bump_candidates.len());
for (first_claim_txid, request) in bump_candidates.iter() {
if let Some((new_timer, new_feerate, bump_tx)) = self.generate_claim_tx(height, &request, &*fee_estimator, &*logger) {
log_trace!(logger, "Broadcasting onchain {}", log_tx!(bump_tx));
log_info!(logger, "Broadcasting RBF-bumped onchain {}", log_tx!(bump_tx));
broadcaster.broadcast_transaction(&bump_tx);
if let Some(request) = self.pending_claim_requests.get_mut(first_claim_txid) {
request.set_timer(new_timer);
Expand Down
12 changes: 6 additions & 6 deletions lightning/src/chain/package.rs
Original file line number Diff line number Diff line change
Expand Up @@ -603,18 +603,18 @@ impl PackageTemplate {
});
}
for (i, (outpoint, out)) in self.inputs.iter().enumerate() {
log_trace!(logger, "Adding claiming input for outpoint {}:{}", outpoint.txid, outpoint.vout);
log_debug!(logger, "Adding claiming input for outpoint {}:{}", outpoint.txid, outpoint.vout);
if !out.finalize_input(&mut bumped_tx, i, onchain_handler) { return None; }
}
log_trace!(logger, "Finalized transaction {} ready to broadcast", bumped_tx.txid());
log_debug!(logger, "Finalized transaction {} ready to broadcast", bumped_tx.txid());
return Some(bumped_tx);
},
PackageMalleability::Untractable => {
debug_assert_eq!(value, 0, "value is ignored for non-malleable packages, should be zero to ensure callsites are correct");
if let Some((outpoint, outp)) = self.inputs.first() {
if let Some(final_tx) = outp.get_finalized_tx(outpoint, onchain_handler) {
log_trace!(logger, "Adding claiming input for outpoint {}:{}", outpoint.txid, outpoint.vout);
log_trace!(logger, "Finalized transaction {} ready to broadcast", final_tx.txid());
log_debug!(logger, "Adding claiming input for outpoint {}:{}", outpoint.txid, outpoint.vout);
log_debug!(logger, "Finalized transaction {} ready to broadcast", final_tx.txid());
return Some(final_tx);
}
return None;
Expand Down Expand Up @@ -794,13 +794,13 @@ fn feerate_bump<F: Deref, L: Deref>(predicted_weight: usize, input_amounts: u64,
// ...else just increase the previous feerate by 25% (because that's a nice number)
let new_fee = previous_feerate * (predicted_weight as u64) / 750;
if input_amounts <= new_fee {
log_trace!(logger, "Can't 25% bump new claiming tx, amount {} is too small", input_amounts);
log_warn!(logger, "Can't 25% bump new claiming tx, amount {} is too small", input_amounts);
return None;
}
new_fee
}
} else {
log_trace!(logger, "Can't new-estimation bump new claiming tx, amount {} is too small", input_amounts);
log_warn!(logger, "Can't new-estimation bump new claiming tx, amount {} is too small", input_amounts);
return None;
};

Expand Down
Loading