Skip to content

Remove Channel's ChannelMonitor copy #667

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions fuzz/src/chanmon_consistency.rs
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ use lightning::chain::transaction::OutPoint;
use lightning::chain::chaininterface::{BroadcasterInterface,ConfirmationTarget,ChainListener,FeeEstimator,ChainWatchInterfaceUtil,ChainWatchInterface};
use lightning::chain::keysinterface::{KeysInterface, InMemoryChannelKeys};
use lightning::ln::channelmonitor;
use lightning::ln::channelmonitor::{ChannelMonitor, ChannelMonitorUpdateErr, HTLCUpdate};
use lightning::ln::channelmonitor::{ChannelMonitor, ChannelMonitorUpdateErr, MonitorEvent};
use lightning::ln::channelmanager::{ChannelManager, PaymentHash, PaymentPreimage, PaymentSecret, ChannelManagerReadArgs};
use lightning::ln::features::{ChannelFeatures, InitFeatures, NodeFeatures};
use lightning::ln::msgs::{CommitmentUpdate, ChannelMessageHandler, ErrorAction, UpdateAddHTLC, Init};
Expand Down Expand Up @@ -135,8 +135,8 @@ impl channelmonitor::ManyChannelMonitor for TestChannelMonitor {
self.update_ret.lock().unwrap().clone()
}

fn get_and_clear_pending_htlcs_updated(&self) -> Vec<HTLCUpdate> {
return self.simple_monitor.get_and_clear_pending_htlcs_updated();
fn get_and_clear_pending_monitor_events(&self) -> Vec<MonitorEvent> {
return self.simple_monitor.get_and_clear_pending_monitor_events();
}
}

Expand Down
35 changes: 0 additions & 35 deletions lightning/src/ln/channel.rs
Original file line number Diff line number Diff line change
Expand Up @@ -386,9 +386,6 @@ pub(super) struct Channel<ChanSigner: ChannelKeys> {

their_shutdown_scriptpubkey: Option<Script>,

/// Used exclusively to broadcast the latest local state, mostly a historical quirk that this
/// is here:
channel_monitor: Option<ChannelMonitor<ChanSigner>>,
commitment_secrets: CounterpartyCommitmentSecrets,

network_sync: UpdateStatus,
Expand Down Expand Up @@ -557,7 +554,6 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {

their_shutdown_scriptpubkey: None,

channel_monitor: None,
commitment_secrets: CounterpartyCommitmentSecrets::new(),

network_sync: UpdateStatus::Fresh,
Expand Down Expand Up @@ -786,7 +782,6 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {

their_shutdown_scriptpubkey,

channel_monitor: None,
commitment_secrets: CounterpartyCommitmentSecrets::new(),

network_sync: UpdateStatus::Fresh,
Expand Down Expand Up @@ -1222,7 +1217,6 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
payment_preimage: payment_preimage_arg.clone(),
}],
};
self.channel_monitor.as_mut().unwrap().update_monitor_ooo(monitor_update.clone(), logger).unwrap();

if (self.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateFailed as u32)) != 0 {
for pending_update in self.holding_cell_htlc_updates.iter() {
Expand Down Expand Up @@ -1552,7 +1546,6 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
} }
}

self.channel_monitor = Some(create_monitor!());
let channel_monitor = create_monitor!();

self.channel_state = ChannelState::FundingSent as u32;
Expand Down Expand Up @@ -1618,7 +1611,6 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
} }
}

self.channel_monitor = Some(create_monitor!());
let channel_monitor = create_monitor!();

assert_eq!(self.channel_state & (ChannelState::MonitorUpdateFailed as u32), 0); // We have no had any monitor(s) yet to fail update!
Expand Down Expand Up @@ -2060,7 +2052,6 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
htlc_outputs: htlcs_and_sigs
}]
};
self.channel_monitor.as_mut().unwrap().update_monitor_ooo(monitor_update.clone(), logger).unwrap();

for htlc in self.pending_inbound_htlcs.iter_mut() {
let new_forward = if let &InboundHTLCState::RemoteAnnounced(ref forward_info) = &htlc.state {
Expand Down Expand Up @@ -2280,7 +2271,6 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
secret: msg.per_commitment_secret,
}],
};
self.channel_monitor.as_mut().unwrap().update_monitor_ooo(monitor_update.clone(), logger).unwrap();

// Update state now that we've passed all the can-fail calls...
// (note that we may still fail to generate the new commitment_signed message, but that's
Expand Down Expand Up @@ -3115,14 +3105,6 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
self.user_id
}

/// May only be called after funding has been initiated (ie is_funding_initiated() is true)
pub fn channel_monitor(&mut self) -> &mut ChannelMonitor<ChanSigner> {
if self.channel_state < ChannelState::FundingSent as u32 {
panic!("Can't get a channel monitor until funding has been created");
}
self.channel_monitor.as_mut().unwrap()
}

/// Guaranteed to be Some after both FundingLocked messages have been exchanged (and, thus,
/// is_usable() returns true).
/// Allowed in any state (including after shutdown)
Expand Down Expand Up @@ -3397,9 +3379,6 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
if header.bitcoin_hash() != self.last_block_connected {
self.last_block_connected = header.bitcoin_hash();
self.update_time_counter = cmp::max(self.update_time_counter, header.time);
if let Some(channel_monitor) = self.channel_monitor.as_mut() {
channel_monitor.last_block_hash = self.last_block_connected;
}
if self.funding_tx_confirmations > 0 {
if self.funding_tx_confirmations == self.minimum_depth as u64 {
let need_commitment_update = if non_shutdown_state == ChannelState::FundingSent as u32 {
Expand Down Expand Up @@ -3458,9 +3437,6 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
self.funding_tx_confirmations = self.minimum_depth as u64 - 1;
}
self.last_block_connected = header.bitcoin_hash();
if let Some(channel_monitor) = self.channel_monitor.as_mut() {
channel_monitor.last_block_hash = self.last_block_connected;
}
false
}

Expand Down Expand Up @@ -3871,7 +3847,6 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
their_revocation_point: self.their_cur_commitment_point.unwrap()
}]
};
self.channel_monitor.as_mut().unwrap().update_monitor_ooo(monitor_update.clone(), logger).unwrap();
self.channel_state |= ChannelState::AwaitingRemoteRevoke as u32;
Ok((res, monitor_update))
}
Expand Down Expand Up @@ -4242,8 +4217,6 @@ impl<ChanSigner: ChannelKeys + Writeable> Writeable for Channel<ChanSigner> {
self.their_shutdown_scriptpubkey.write(writer)?;

self.commitment_secrets.write(writer)?;

self.channel_monitor.as_ref().unwrap().write_for_disk(writer)?;
Ok(())
}
}
Expand Down Expand Up @@ -4398,13 +4371,6 @@ impl<ChanSigner: ChannelKeys + Readable> Readable for Channel<ChanSigner> {
let their_shutdown_scriptpubkey = Readable::read(reader)?;
let commitment_secrets = Readable::read(reader)?;

let (monitor_last_block, channel_monitor) = Readable::read(reader)?;
// We drop the ChannelMonitor's last block connected hash cause we don't actually bother
// doing full block connection operations on the internal ChannelMonitor copies
if monitor_last_block != last_block_connected {
return Err(DecodeError::InvalidValue);
}

Ok(Channel {
user_id,

Expand Down Expand Up @@ -4476,7 +4442,6 @@ impl<ChanSigner: ChannelKeys + Readable> Readable for Channel<ChanSigner> {

their_shutdown_scriptpubkey,

channel_monitor: Some(channel_monitor),
commitment_secrets,

network_sync: UpdateStatus::Fresh,
Expand Down
95 changes: 49 additions & 46 deletions lightning/src/ln/channelmanager.rs
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ use bitcoin::secp256k1;
use chain::chaininterface::{BroadcasterInterface,ChainListener,FeeEstimator};
use chain::transaction::OutPoint;
use ln::channel::{Channel, ChannelError};
use ln::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateErr, ManyChannelMonitor, HTLC_FAIL_BACK_BUFFER, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY};
use ln::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateErr, ManyChannelMonitor, HTLC_FAIL_BACK_BUFFER, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY, MonitorEvent};
use ln::features::{InitFeatures, NodeFeatures};
use routing::router::{Route, RouteHop};
use ln::msgs;
Expand Down Expand Up @@ -2966,6 +2966,48 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
Err(e) => { Err(APIError::APIMisuseError { err: e.err })}
}
}

/// Process pending events from the ManyChannelMonitor.
fn process_pending_monitor_events(&self) {
let mut failed_channels = Vec::new();
{
for monitor_event in self.monitor.get_and_clear_pending_monitor_events() {
match monitor_event {
MonitorEvent::HTLCEvent(htlc_update) => {
if let Some(preimage) = htlc_update.payment_preimage {
log_trace!(self.logger, "Claiming HTLC with preimage {} from our monitor", log_bytes!(preimage.0));
self.claim_funds_internal(self.channel_state.lock().unwrap(), htlc_update.source, preimage);
} else {
log_trace!(self.logger, "Failing HTLC with hash {} from our monitor", log_bytes!(htlc_update.payment_hash.0));
self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_update.source, &htlc_update.payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
}
},
MonitorEvent::CommitmentTxBroadcasted(funding_outpoint) => {
let mut channel_lock = self.channel_state.lock().unwrap();
let channel_state = &mut *channel_lock;
let by_id = &mut channel_state.by_id;
let short_to_id = &mut channel_state.short_to_id;
let pending_msg_events = &mut channel_state.pending_msg_events;
if let Some(mut chan) = by_id.remove(&funding_outpoint.to_channel_id()) {
if let Some(short_id) = chan.get_short_channel_id() {
short_to_id.remove(&short_id);
}
failed_channels.push(chan.force_shutdown(false));
if let Ok(update) = self.get_channel_update(&chan) {
pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
msg: update
});
}
}
},
}
}
}

for failure in failed_channels.drain(..) {
self.finish_force_close_channel(failure);
}
}
}

impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> events::MessageSendEventsProvider for ChannelManager<ChanSigner, M, T, K, F, L>
Expand All @@ -2976,21 +3018,9 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
L::Target: Logger,
{
fn get_and_clear_pending_msg_events(&self) -> Vec<events::MessageSendEvent> {
// TODO: Event release to users and serialization is currently race-y: it's very easy for a
// user to serialize a ChannelManager with pending events in it and lose those events on
// restart. This is doubly true for the fail/fulfill-backs from monitor events!
{
//TODO: This behavior should be documented.
for htlc_update in self.monitor.get_and_clear_pending_htlcs_updated() {
if let Some(preimage) = htlc_update.payment_preimage {
log_trace!(self.logger, "Claiming HTLC with preimage {} from our monitor", log_bytes!(preimage.0));
self.claim_funds_internal(self.channel_state.lock().unwrap(), htlc_update.source, preimage);
} else {
log_trace!(self.logger, "Failing HTLC with hash {} from our monitor", log_bytes!(htlc_update.payment_hash.0));
self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_update.source, &htlc_update.payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
}
}
}
//TODO: This behavior should be documented. It's non-intuitive that we query
// ChannelMonitors when clearing other events.
self.process_pending_monitor_events();

let mut ret = Vec::new();
let mut channel_state = self.channel_state.lock().unwrap();
Expand All @@ -3007,21 +3037,9 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
L::Target: Logger,
{
fn get_and_clear_pending_events(&self) -> Vec<events::Event> {
// TODO: Event release to users and serialization is currently race-y: it's very easy for a
// user to serialize a ChannelManager with pending events in it and lose those events on
// restart. This is doubly true for the fail/fulfill-backs from monitor events!
{
//TODO: This behavior should be documented.
for htlc_update in self.monitor.get_and_clear_pending_htlcs_updated() {
if let Some(preimage) = htlc_update.payment_preimage {
log_trace!(self.logger, "Claiming HTLC with preimage {} from our monitor", log_bytes!(preimage.0));
self.claim_funds_internal(self.channel_state.lock().unwrap(), htlc_update.source, preimage);
} else {
log_trace!(self.logger, "Failing HTLC with hash {} from our monitor", log_bytes!(htlc_update.payment_hash.0));
self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_update.source, &htlc_update.payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
}
}
}
//TODO: This behavior should be documented. It's non-intuitive that we query
// ChannelMonitors when clearing other events.
self.process_pending_monitor_events();

let mut ret = Vec::new();
let mut pending_events = self.pending_events.lock().unwrap();
Expand Down Expand Up @@ -3104,21 +3122,6 @@ impl<ChanSigner: ChannelKeys, M: Deref + Sync + Send, T: Deref + Sync + Send, K:
}
}
}
if channel.is_funding_initiated() && channel.channel_monitor().would_broadcast_at_height(height, &self.logger) {
if let Some(short_id) = channel.get_short_channel_id() {
short_to_id.remove(&short_id);
}
// If would_broadcast_at_height() is true, the channel_monitor will broadcast
// the latest local tx for us, so we should skip that here (it doesn't really
// hurt anything, but does make tests a bit simpler).
failed_channels.push(channel.force_shutdown(false));
if let Ok(update) = self.get_channel_update(&channel) {
pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
msg: update
});
}
return false;
}
true
});

Expand Down
Loading