diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 425b7f14114..9a05f5a4aae 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -302,6 +302,85 @@ const MULTI_STATE_FLAGS: u32 = BOTH_SIDES_SHUTDOWN_MASK | ChannelState::PeerDisc pub const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1; +pub const OUR_MAX_HTLCS: u16 = 50; //TODO + +pub(crate) fn commitment_tx_base_weight(opt_anchors: bool) -> u64 { + const COMMITMENT_TX_BASE_WEIGHT: u64 = 724; + const COMMITMENT_TX_BASE_ANCHOR_WEIGHT: u64 = 1124; + if opt_anchors { COMMITMENT_TX_BASE_ANCHOR_WEIGHT } else { COMMITMENT_TX_BASE_WEIGHT } +} + +#[cfg(not(test))] +const COMMITMENT_TX_WEIGHT_PER_HTLC: u64 = 172; +#[cfg(test)] +pub const COMMITMENT_TX_WEIGHT_PER_HTLC: u64 = 172; + +pub const ANCHOR_OUTPUT_VALUE_SATOSHI: u64 = 330; + +/// The percentage of the channel value `holder_max_htlc_value_in_flight_msat` used to be set to, +/// before this was made configurable. The percentage was made configurable in LDK 0.0.107, +/// although LDK 0.0.104+ enabled serialization of channels with a different value set for +/// `holder_max_htlc_value_in_flight_msat`. +pub const MAX_IN_FLIGHT_PERCENT_LEGACY: u8 = 10; + +/// Maximum `funding_satoshis` value according to the BOLT #2 specification, if +/// `option_support_large_channel` (aka wumbo channels) is not supported. +/// It's 2^24 - 1. +pub const MAX_FUNDING_SATOSHIS_NO_WUMBO: u64 = (1 << 24) - 1; + +/// Total bitcoin supply in satoshis. +pub const TOTAL_BITCOIN_SUPPLY_SATOSHIS: u64 = 21_000_000 * 1_0000_0000; + +/// The maximum network dust limit for standard script formats. This currently represents the +/// minimum output value for a P2SH output before Bitcoin Core 22 considers the entire +/// transaction non-standard and thus refuses to relay it. +/// We also use this as the maximum counterparty `dust_limit_satoshis` allowed, given many +/// implementations use this value for their dust limit today. +pub const MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS: u64 = 546; + +/// The maximum channel dust limit we will accept from our counterparty. +pub const MAX_CHAN_DUST_LIMIT_SATOSHIS: u64 = MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS; + +/// The dust limit is used for both the commitment transaction outputs as well as the closing +/// transactions. For cooperative closing transactions, we require segwit outputs, though accept +/// *any* segwit scripts, which are allowed to be up to 42 bytes in length. +/// In order to avoid having to concern ourselves with standardness during the closing process, we +/// simply require our counterparty to use a dust limit which will leave any segwit output +/// standard. +/// See for more details. +pub const MIN_CHAN_DUST_LIMIT_SATOSHIS: u64 = 354; + +// Just a reasonable implementation-specific safe lower bound, higher than the dust limit. +pub const MIN_THEIR_CHAN_RESERVE_SATOSHIS: u64 = 1000; + +/// Used to return a simple Error back to ChannelManager. Will get converted to a +/// msgs::ErrorAction::SendErrorMessage or msgs::ErrorAction::IgnoreError as appropriate with our +/// channel_id in ChannelManager. +pub(super) enum ChannelError { + Ignore(String), + Warn(String), + Close(String), +} + +impl fmt::Debug for ChannelError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + &ChannelError::Ignore(ref e) => write!(f, "Ignore : {}", e), + &ChannelError::Warn(ref e) => write!(f, "Warn : {}", e), + &ChannelError::Close(ref e) => write!(f, "Close : {}", e), + } + } +} + +macro_rules! secp_check { + ($res: expr, $err: expr) => { + match $res { + Ok(thing) => thing, + Err(_) => return Err(ChannelError::Close($err)), + } + }; +} + /// The "channel disabled" bit in channel_update must be set based on whether we are connected to /// our counterparty or not. However, we don't want to announce updates right away to avoid /// spamming the network with updates if the connection is flapping. Instead, we "stage" updates to @@ -346,7 +425,7 @@ enum HTLCInitiator { } /// An enum gathering stats on pending HTLCs, either inbound or outbound side. -struct HTLCStats { +pub(crate) struct HTLCStats { pending_htlcs: u32, pending_htlcs_value_msat: u64, on_counterparty_tx_dust_exposure_msat: u64, @@ -356,7 +435,7 @@ struct HTLCStats { } /// An enum gathering stats on commitment transaction, either local or remote. -struct CommitmentStats<'a> { +pub(crate) struct CommitmentStats<'a> { tx: CommitmentTransaction, // the transaction info feerate_per_kw: u32, // the feerate included to build the transaction total_fee_sat: u64, // the total fee included in the transaction @@ -368,7 +447,7 @@ struct CommitmentStats<'a> { } /// Used when calculating whether we or the remote can afford an additional HTLC. -struct HTLCCandidate { +pub(crate) struct HTLCCandidate { amount_msat: u64, origin: HTLCInitiator, } @@ -759,6 +838,10 @@ pub(super) trait ChannelInterface<'a, Signer: WriteableEcdsaChannelSigner + 'a> self.get_context().user_id } + fn opt_anchors(&'a self) -> bool { + self.get_context().channel_transaction_parameters.opt_anchors.is_some() + } + /// Gets the channel's type fn get_channel_type(&'a self) -> &ChannelTypeFeatures { &self.get_context().channel_type @@ -968,470 +1051,799 @@ pub(super) trait ChannelInterface<'a, Signer: WriteableEcdsaChannelSigner + 'a> self.get_context().channel_transaction_parameters.is_outbound_from_holder } - /// Returns the value to use for `holder_max_htlc_value_in_flight_msat` as a percentage of the - /// `channel_value_satoshis` in msat, set through - /// [`ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel`] - /// - /// The effective percentage is lower bounded by 1% and upper bounded by 100%. - /// - /// [`ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel`]: crate::util::config::ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel - fn get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis: u64, config: &ChannelHandshakeConfig) -> u64 { - let configured_percent = if config.max_inbound_htlc_value_in_flight_percent_of_channel < 1 { - 1 - } else if config.max_inbound_htlc_value_in_flight_percent_of_channel > 100 { - 100 - } else { - config.max_inbound_htlc_value_in_flight_percent_of_channel as u64 - }; - channel_value_satoshis * 10 * configured_percent + fn counterparty_funding_pubkey(&'a self) -> &PublicKey { + &self.get_counterparty_pubkeys().funding_pubkey } - // Get the fee cost in MSATS of a commitment tx with a given number of HTLC outputs. - // Note that num_htlcs should not include dust HTLCs. - fn commit_tx_fee_msat(feerate_per_kw: u32, num_htlcs: usize, opt_anchors: bool) -> u64 { - // Note that we need to divide before multiplying to round properly, - // since the lowest denomination of bitcoin on-chain is the satoshi. - (commitment_tx_base_weight(opt_anchors) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) * feerate_per_kw as u64 / 1000 * 1000 + /// Gets the redeemscript for the funding transaction output (ie the funding transaction output + /// pays to get_funding_redeemscript().to_v0_p2wsh()). + /// Panics if called before accept_channel/new_from_req + fn get_funding_redeemscript(&'a self) -> Script { + make_funding_redeemscript(&self.get_holder_pubkeys().funding_pubkey, self.counterparty_funding_pubkey()) } - // Get the fee cost in SATS of a commitment tx with a given number of HTLC outputs. - // Note that num_htlcs should not include dust HTLCs. #[inline] - fn commit_tx_fee_sat(feerate_per_kw: u32, num_htlcs: usize, opt_anchors: bool) -> u64 { - feerate_per_kw as u64 * (commitment_tx_base_weight(opt_anchors) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000 - } -} + /// Creates a set of keys for build_commitment_transaction to generate a transaction which our + /// counterparty will sign (ie DO NOT send signatures over a transaction created by this to + /// our counterparty!) + /// The result is a transaction which we can revoke broadcastership of (ie a "local" transaction) + /// TODO Some magic rust shit to compile-time check this? + fn build_holder_transaction_keys(&'a self, commitment_number: u64) -> TxCreationKeys { + let context = self.get_context(); + let per_commitment_point = context.holder_signer.get_per_commitment_point(commitment_number, &context.secp_ctx); + let delayed_payment_base = &self.get_holder_pubkeys().delayed_payment_basepoint; + let htlc_basepoint = &self.get_holder_pubkeys().htlc_basepoint; + let counterparty_pubkeys = self.get_counterparty_pubkeys(); -impl<'a, Signer: WriteableEcdsaChannelSigner + 'a> ChannelInterface<'a, Signer> for FundedChannel { - fn get_context(&'a self) -> &'a ChannelContext { - &self.context + TxCreationKeys::derive_new(&context.secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &counterparty_pubkeys.revocation_basepoint, &counterparty_pubkeys.htlc_basepoint) } - fn get_context_mut(&'a mut self) -> &'a mut ChannelContext { - &mut self.context - } -} + #[inline] + /// Creates a set of keys for build_commitment_transaction to generate a transaction which we + /// will sign and send to our counterparty. + /// If an Err is returned, it is a ChannelError::Close (for get_outbound_funding_created) + fn build_remote_transaction_keys(&'a self) -> TxCreationKeys { + //TODO: Ensure that the payment_key derived here ends up in the library users' wallet as we + //may see payments to it! + let revocation_basepoint = &self.get_holder_pubkeys().revocation_basepoint; + let htlc_basepoint = &self.get_holder_pubkeys().htlc_basepoint; + let counterparty_pubkeys = self.get_counterparty_pubkeys(); -// TODO: We should refactor this to be an Inbound/OutboundChannel until initial setup handshaking -// has been completed, and then turn into a Channel to get compiler-time enforcement of things like -// calling channel_id() before we're set up or things like get_outbound_funding_signed on an -// inbound channel. -// -// Holder designates channel data owned for the benefice of the user client. -// Counterparty designates channel data owned by the another channel participant entity. -pub(super) struct FundedChannel { - #[cfg(not(test))] - context: ChannelContext, - #[cfg(test)] - pub context: ChannelContext, -} + let context = self.get_context(); + TxCreationKeys::derive_new(&context.secp_ctx, &context.counterparty_cur_commitment_point.unwrap(), &counterparty_pubkeys.delayed_payment_basepoint, &counterparty_pubkeys.htlc_basepoint, revocation_basepoint, htlc_basepoint) + } -#[cfg(any(test, fuzzing))] -struct CommitmentTxInfoCached { - fee: u64, - total_pending_htlcs: usize, - next_holder_htlc_id: u64, - next_counterparty_htlc_id: u64, - feerate: u32, -} + /// Transaction nomenclature is somewhat confusing here as there are many different cases - a + /// transaction is referred to as "a's transaction" implying that a will be able to broadcast + /// the transaction. Thus, b will generally be sending a signature over such a transaction to + /// a, and a can revoke the transaction by providing b the relevant per_commitment_secret. As + /// such, a transaction is generally the result of b increasing the amount paid to a (or adding + /// an HTLC to a). + /// @local is used only to convert relevant internal structures which refer to remote vs local + /// to decide value of outputs and direction of HTLCs. + /// @generated_by_local is used to determine *which* HTLCs to include - noting that the HTLC + /// state may indicate that one peer has informed the other that they'd like to add an HTLC but + /// have not yet committed it. Such HTLCs will only be included in transactions which are being + /// generated by the peer which proposed adding the HTLCs, and thus we need to understand both + /// which peer generated this transaction and "to whom" this transaction flows. + #[inline] + fn build_commitment_transaction(&'a self, commitment_number: u64, keys: &TxCreationKeys, local: bool, generated_by_local: bool, logger: &L) -> CommitmentStats + where L::Target: Logger + { + let context = self.get_context(); + let mut included_dust_htlcs: Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)> = Vec::new(); + let num_htlcs = context.pending_inbound_htlcs.len() + context.pending_outbound_htlcs.len(); + let mut included_non_dust_htlcs: Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)> = Vec::with_capacity(num_htlcs); -pub const OUR_MAX_HTLCS: u16 = 50; //TODO + let broadcaster_dust_limit_satoshis = if local { context.holder_dust_limit_satoshis } else { context.counterparty_dust_limit_satoshis }; + let mut remote_htlc_total_msat = 0; + let mut local_htlc_total_msat = 0; + let mut value_to_self_msat_offset = 0; -pub(crate) fn commitment_tx_base_weight(opt_anchors: bool) -> u64 { - const COMMITMENT_TX_BASE_WEIGHT: u64 = 724; - const COMMITMENT_TX_BASE_ANCHOR_WEIGHT: u64 = 1124; - if opt_anchors { COMMITMENT_TX_BASE_ANCHOR_WEIGHT } else { COMMITMENT_TX_BASE_WEIGHT } -} + let mut feerate_per_kw = context.feerate_per_kw; + if let Some((feerate, update_state)) = context.pending_update_fee { + if match update_state { + // Note that these match the inclusion criteria when scanning + // pending_inbound_htlcs below. + FeeUpdateState::RemoteAnnounced => { debug_assert!(!self.is_outbound()); !generated_by_local }, + FeeUpdateState::AwaitingRemoteRevokeToAnnounce => { debug_assert!(!self.is_outbound()); !generated_by_local }, + FeeUpdateState::Outbound => { assert!(self.is_outbound()); generated_by_local }, + } { + feerate_per_kw = feerate; + } + } -#[cfg(not(test))] -const COMMITMENT_TX_WEIGHT_PER_HTLC: u64 = 172; -#[cfg(test)] -pub const COMMITMENT_TX_WEIGHT_PER_HTLC: u64 = 172; + log_trace!(logger, "Building commitment transaction number {} (really {} xor {}) for channel {} for {}, generated by {} with fee {}...", + commitment_number, (INITIAL_COMMITMENT_NUMBER - commitment_number), + get_commitment_transaction_number_obscure_factor(&self.get_holder_pubkeys().payment_point, &self.get_counterparty_pubkeys().payment_point, self.is_outbound()), + log_bytes!(context.channel_id), if local { "us" } else { "remote" }, if generated_by_local { "us" } else { "remote" }, feerate_per_kw); -pub const ANCHOR_OUTPUT_VALUE_SATOSHI: u64 = 330; + macro_rules! get_htlc_in_commitment { + ($htlc: expr, $offered: expr) => { + HTLCOutputInCommitment { + offered: $offered, + amount_msat: $htlc.amount_msat, + cltv_expiry: $htlc.cltv_expiry, + payment_hash: $htlc.payment_hash, + transaction_output_index: None + } + } + } -/// The percentage of the channel value `holder_max_htlc_value_in_flight_msat` used to be set to, -/// before this was made configurable. The percentage was made configurable in LDK 0.0.107, -/// although LDK 0.0.104+ enabled serialization of channels with a different value set for -/// `holder_max_htlc_value_in_flight_msat`. -pub const MAX_IN_FLIGHT_PERCENT_LEGACY: u8 = 10; + macro_rules! add_htlc_output { + ($htlc: expr, $outbound: expr, $source: expr, $state_name: expr) => { + if $outbound == local { // "offered HTLC output" + let htlc_in_tx = get_htlc_in_commitment!($htlc, true); + let htlc_tx_fee = if self.opt_anchors() { + 0 + } else { + feerate_per_kw as u64 * htlc_timeout_tx_weight(false) / 1000 + }; + if $htlc.amount_msat / 1000 >= broadcaster_dust_limit_satoshis + htlc_tx_fee { + log_trace!(logger, " ...including {} {} HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, log_bytes!($htlc.payment_hash.0), $htlc.amount_msat); + included_non_dust_htlcs.push((htlc_in_tx, $source)); + } else { + log_trace!(logger, " ...including {} {} dust HTLC {} (hash {}) with value {} due to dust limit", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, log_bytes!($htlc.payment_hash.0), $htlc.amount_msat); + included_dust_htlcs.push((htlc_in_tx, $source)); + } + } else { + let htlc_in_tx = get_htlc_in_commitment!($htlc, false); + let htlc_tx_fee = if self.opt_anchors() { + 0 + } else { + feerate_per_kw as u64 * htlc_success_tx_weight(false) / 1000 + }; + if $htlc.amount_msat / 1000 >= broadcaster_dust_limit_satoshis + htlc_tx_fee { + log_trace!(logger, " ...including {} {} HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, log_bytes!($htlc.payment_hash.0), $htlc.amount_msat); + included_non_dust_htlcs.push((htlc_in_tx, $source)); + } else { + log_trace!(logger, " ...including {} {} dust HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, log_bytes!($htlc.payment_hash.0), $htlc.amount_msat); + included_dust_htlcs.push((htlc_in_tx, $source)); + } + } + } + } -/// Maximum `funding_satoshis` value according to the BOLT #2 specification, if -/// `option_support_large_channel` (aka wumbo channels) is not supported. -/// It's 2^24 - 1. -pub const MAX_FUNDING_SATOSHIS_NO_WUMBO: u64 = (1 << 24) - 1; - -/// Total bitcoin supply in satoshis. -pub const TOTAL_BITCOIN_SUPPLY_SATOSHIS: u64 = 21_000_000 * 1_0000_0000; + for ref htlc in context.pending_inbound_htlcs.iter() { + let (include, state_name) = match htlc.state { + InboundHTLCState::RemoteAnnounced(_) => (!generated_by_local, "RemoteAnnounced"), + InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) => (!generated_by_local, "AwaitingRemoteRevokeToAnnounce"), + InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) => (true, "AwaitingAnnouncedRemoteRevoke"), + InboundHTLCState::Committed => (true, "Committed"), + InboundHTLCState::LocalRemoved(_) => (!generated_by_local, "LocalRemoved"), + }; -/// The maximum network dust limit for standard script formats. This currently represents the -/// minimum output value for a P2SH output before Bitcoin Core 22 considers the entire -/// transaction non-standard and thus refuses to relay it. -/// We also use this as the maximum counterparty `dust_limit_satoshis` allowed, given many -/// implementations use this value for their dust limit today. -pub const MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS: u64 = 546; + if include { + add_htlc_output!(htlc, false, None, state_name); + remote_htlc_total_msat += htlc.amount_msat; + } else { + log_trace!(logger, " ...not including inbound HTLC {} (hash {}) with value {} due to state ({})", htlc.htlc_id, log_bytes!(htlc.payment_hash.0), htlc.amount_msat, state_name); + match &htlc.state { + &InboundHTLCState::LocalRemoved(ref reason) => { + if generated_by_local { + if let &InboundHTLCRemovalReason::Fulfill(_) = reason { + value_to_self_msat_offset += htlc.amount_msat as i64; + } + } + }, + _ => {}, + } + } + } -/// The maximum channel dust limit we will accept from our counterparty. -pub const MAX_CHAN_DUST_LIMIT_SATOSHIS: u64 = MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS; + let mut preimages: Vec = Vec::new(); -/// The dust limit is used for both the commitment transaction outputs as well as the closing -/// transactions. For cooperative closing transactions, we require segwit outputs, though accept -/// *any* segwit scripts, which are allowed to be up to 42 bytes in length. -/// In order to avoid having to concern ourselves with standardness during the closing process, we -/// simply require our counterparty to use a dust limit which will leave any segwit output -/// standard. -/// See for more details. -pub const MIN_CHAN_DUST_LIMIT_SATOSHIS: u64 = 354; + for ref htlc in context.pending_outbound_htlcs.iter() { + let (include, state_name) = match htlc.state { + OutboundHTLCState::LocalAnnounced(_) => (generated_by_local, "LocalAnnounced"), + OutboundHTLCState::Committed => (true, "Committed"), + OutboundHTLCState::RemoteRemoved(_) => (generated_by_local, "RemoteRemoved"), + OutboundHTLCState::AwaitingRemoteRevokeToRemove(_) => (generated_by_local, "AwaitingRemoteRevokeToRemove"), + OutboundHTLCState::AwaitingRemovedRemoteRevoke(_) => (false, "AwaitingRemovedRemoteRevoke"), + }; -// Just a reasonable implementation-specific safe lower bound, higher than the dust limit. -pub const MIN_THEIR_CHAN_RESERVE_SATOSHIS: u64 = 1000; + let preimage_opt = match htlc.state { + OutboundHTLCState::RemoteRemoved(OutboundHTLCOutcome::Success(p)) => p, + OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(p)) => p, + OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(p)) => p, + _ => None, + }; -/// Used to return a simple Error back to ChannelManager. Will get converted to a -/// msgs::ErrorAction::SendErrorMessage or msgs::ErrorAction::IgnoreError as appropriate with our -/// channel_id in ChannelManager. -pub(super) enum ChannelError { - Ignore(String), - Warn(String), - Close(String), -} + if let Some(preimage) = preimage_opt { + preimages.push(preimage); + } -impl fmt::Debug for ChannelError { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match self { - &ChannelError::Ignore(ref e) => write!(f, "Ignore : {}", e), - &ChannelError::Warn(ref e) => write!(f, "Warn : {}", e), - &ChannelError::Close(ref e) => write!(f, "Close : {}", e), + if include { + add_htlc_output!(htlc, true, Some(&htlc.source), state_name); + local_htlc_total_msat += htlc.amount_msat; + } else { + log_trace!(logger, " ...not including outbound HTLC {} (hash {}) with value {} due to state ({})", htlc.htlc_id, log_bytes!(htlc.payment_hash.0), htlc.amount_msat, state_name); + match htlc.state { + OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_))|OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) => { + value_to_self_msat_offset -= htlc.amount_msat as i64; + }, + OutboundHTLCState::RemoteRemoved(OutboundHTLCOutcome::Success(_)) => { + if !generated_by_local { + value_to_self_msat_offset -= htlc.amount_msat as i64; + } + }, + _ => {}, + } + } } - } -} -macro_rules! secp_check { - ($res: expr, $err: expr) => { - match $res { - Ok(thing) => thing, - Err(_) => return Err(ChannelError::Close($err)), + let mut value_to_self_msat: i64 = (context.value_to_self_msat - local_htlc_total_msat) as i64 + value_to_self_msat_offset; + assert!(value_to_self_msat >= 0); + // Note that in case they have several just-awaiting-last-RAA fulfills in-progress (ie + // AwaitingRemoteRevokeToRemove or AwaitingRemovedRemoteRevoke) we may have allowed them to + // "violate" their reserve value by couting those against it. Thus, we have to convert + // everything to i64 before subtracting as otherwise we can overflow. + let mut value_to_remote_msat: i64 = (context.channel_value_satoshis * 1000) as i64 - (context.value_to_self_msat as i64) - (remote_htlc_total_msat as i64) - value_to_self_msat_offset; + assert!(value_to_remote_msat >= 0); + + #[cfg(debug_assertions)] + { + // Make sure that the to_self/to_remote is always either past the appropriate + // channel_reserve *or* it is making progress towards it. + let mut broadcaster_max_commitment_tx_output = if generated_by_local { + context.holder_max_commitment_tx_output.lock().unwrap() + } else { + context.counterparty_max_commitment_tx_output.lock().unwrap() + }; + debug_assert!(broadcaster_max_commitment_tx_output.0 <= value_to_self_msat as u64 || value_to_self_msat / 1000 >= context.counterparty_selected_channel_reserve_satoshis.unwrap() as i64); + broadcaster_max_commitment_tx_output.0 = cmp::max(broadcaster_max_commitment_tx_output.0, value_to_self_msat as u64); + debug_assert!(broadcaster_max_commitment_tx_output.1 <= value_to_remote_msat as u64 || value_to_remote_msat / 1000 >= context.holder_selected_channel_reserve_satoshis as i64); + broadcaster_max_commitment_tx_output.1 = cmp::max(broadcaster_max_commitment_tx_output.1, value_to_remote_msat as u64); } - }; -} -impl FundedChannel { - /// Returns the value to use for `holder_max_htlc_value_in_flight_msat` as a percentage of the - /// `channel_value_satoshis` in msat, set through - /// [`ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel`] - /// - /// The effective percentage is lower bounded by 1% and upper bounded by 100%. - /// - /// [`ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel`]: crate::util::config::ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel - fn get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis: u64, config: &ChannelHandshakeConfig) -> u64 { - let configured_percent = if config.max_inbound_htlc_value_in_flight_percent_of_channel < 1 { - 1 - } else if config.max_inbound_htlc_value_in_flight_percent_of_channel > 100 { - 100 + let total_fee_sat = FundedChannel::::commit_tx_fee_sat(feerate_per_kw, included_non_dust_htlcs.len(), context.channel_transaction_parameters.opt_anchors.is_some()); + let anchors_val = if context.channel_transaction_parameters.opt_anchors.is_some() { ANCHOR_OUTPUT_VALUE_SATOSHI * 2 } else { 0 } as i64; + let (value_to_self, value_to_remote) = if self.is_outbound() { + (value_to_self_msat / 1000 - anchors_val - total_fee_sat as i64, value_to_remote_msat / 1000) } else { - config.max_inbound_htlc_value_in_flight_percent_of_channel as u64 + (value_to_self_msat / 1000, value_to_remote_msat / 1000 - anchors_val - total_fee_sat as i64) }; - channel_value_satoshis * 10 * configured_percent - } - /// Returns a minimum channel reserve value the remote needs to maintain, - /// required by us according to the configured or default - /// [`ChannelHandshakeConfig::their_channel_reserve_proportional_millionths`] - /// - /// Guaranteed to return a value no larger than channel_value_satoshis - /// - /// This is used both for outbound and inbound channels and has lower bound - /// of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`. - pub(crate) fn get_holder_selected_channel_reserve_satoshis(channel_value_satoshis: u64, config: &UserConfig) -> u64 { - let calculated_reserve = channel_value_satoshis.saturating_mul(config.channel_handshake_config.their_channel_reserve_proportional_millionths as u64) / 1_000_000; - cmp::min(channel_value_satoshis, cmp::max(calculated_reserve, MIN_THEIR_CHAN_RESERVE_SATOSHIS)) - } + let mut value_to_a = if local { value_to_self } else { value_to_remote }; + let mut value_to_b = if local { value_to_remote } else { value_to_self }; + let (funding_pubkey_a, funding_pubkey_b) = if local { + (self.get_holder_pubkeys().funding_pubkey, self.get_counterparty_pubkeys().funding_pubkey) + } else { + (self.get_counterparty_pubkeys().funding_pubkey, self.get_holder_pubkeys().funding_pubkey) + }; - /// This is for legacy reasons, present for forward-compatibility. - /// LDK versions older than 0.0.104 don't know how read/handle values other than default - /// from storage. Hence, we use this function to not persist default values of - /// `holder_selected_channel_reserve_satoshis` for channels into storage. - pub(crate) fn get_legacy_default_holder_selected_channel_reserve_satoshis(channel_value_satoshis: u64) -> u64 { - let (q, _) = channel_value_satoshis.overflowing_div(100); - cmp::min(channel_value_satoshis, cmp::max(q, 1000)) + if value_to_a >= (broadcaster_dust_limit_satoshis as i64) { + log_trace!(logger, " ...including {} output with value {}", if local { "to_local" } else { "to_remote" }, value_to_a); + } else { + value_to_a = 0; + } + + if value_to_b >= (broadcaster_dust_limit_satoshis as i64) { + log_trace!(logger, " ...including {} output with value {}", if local { "to_remote" } else { "to_local" }, value_to_b); + } else { + value_to_b = 0; + } + + let num_nondust_htlcs = included_non_dust_htlcs.len(); + + let channel_parameters = + if local { context.channel_transaction_parameters.as_holder_broadcastable() } + else { context.channel_transaction_parameters.as_counterparty_broadcastable() }; + let tx = CommitmentTransaction::new_with_auxiliary_htlc_data(commitment_number, + value_to_a as u64, + value_to_b as u64, + context.channel_transaction_parameters.opt_anchors.is_some(), + funding_pubkey_a, + funding_pubkey_b, + keys.clone(), + feerate_per_kw, + &mut included_non_dust_htlcs, + &channel_parameters + ); + let mut htlcs_included = included_non_dust_htlcs; + // The unwrap is safe, because all non-dust HTLCs have been assigned an output index + htlcs_included.sort_unstable_by_key(|h| h.0.transaction_output_index.unwrap()); + htlcs_included.append(&mut included_dust_htlcs); + + // For the stats, trimmed-to-0 the value in msats accordingly + value_to_self_msat = if (value_to_self_msat * 1000) < broadcaster_dust_limit_satoshis as i64 { 0 } else { value_to_self_msat }; + value_to_remote_msat = if (value_to_remote_msat * 1000) < broadcaster_dust_limit_satoshis as i64 { 0 } else { value_to_remote_msat }; + + CommitmentStats { + tx, + feerate_per_kw, + total_fee_sat, + num_nondust_htlcs, + htlcs_included, + local_balance_msat: value_to_self_msat as u64, + remote_balance_msat: value_to_remote_msat as u64, + preimages + } } - pub(crate) fn opt_anchors(&self) -> bool { - self.context.channel_transaction_parameters.opt_anchors.is_some() + /// Get forwarding information for the counterparty. + fn counterparty_forwarding_info(&'a self) -> Option { + self.get_context().counterparty_forwarding_info.clone() } - fn get_initial_channel_type(config: &UserConfig, their_features: &InitFeatures) -> ChannelTypeFeatures { - // The default channel type (ie the first one we try) depends on whether the channel is - // public - if it is, we just go with `only_static_remotekey` as it's the only option - // available. If it's private, we first try `scid_privacy` as it provides better privacy - // with no other changes, and fall back to `only_static_remotekey`. - let mut ret = ChannelTypeFeatures::only_static_remote_key(); - if !config.channel_handshake_config.announced_channel && - config.channel_handshake_config.negotiate_scid_privacy && - their_features.supports_scid_privacy() { - ret.set_scid_privacy_required(); - } + /// Get the available balances, see [`AvailableBalances`]'s fields for more info. + /// Doesn't bother handling the + /// if-we-removed-it-already-but-haven't-fully-resolved-they-can-still-send-an-inbound-HTLC + /// corner case properly. + fn get_available_balances(&'a self) -> AvailableBalances { + // Note that we have to handle overflow due to the above case. + let outbound_stats = self.get_outbound_pending_htlc_stats(None); - // Optionally, if the user would like to negotiate the `anchors_zero_fee_htlc_tx` option, we - // set it now. If they don't understand it, we'll fall back to our default of - // `only_static_remotekey`. - #[cfg(anchors)] - { // Attributes are not allowed on if expressions on our current MSRV of 1.41. - if config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx && - their_features.supports_anchors_zero_fee_htlc_tx() { - ret.set_anchors_zero_fee_htlc_tx_required(); + let context = self.get_context(); + let mut balance_msat = context.value_to_self_msat; + for ref htlc in context.pending_inbound_htlcs.iter() { + if let InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(_)) = htlc.state { + balance_msat += htlc.amount_msat; } } + balance_msat -= outbound_stats.pending_htlcs_value_msat; - ret + let outbound_capacity_msat = cmp::max(context.value_to_self_msat as i64 + - outbound_stats.pending_htlcs_value_msat as i64 + - context.counterparty_selected_channel_reserve_satoshis.unwrap_or(0) as i64 * 1000, + 0) as u64; + AvailableBalances { + inbound_capacity_msat: cmp::max(context.channel_value_satoshis as i64 * 1000 + - context.value_to_self_msat as i64 + - self.get_inbound_pending_htlc_stats(None).pending_htlcs_value_msat as i64 + - context.holder_selected_channel_reserve_satoshis as i64 * 1000, + 0) as u64, + outbound_capacity_msat, + next_outbound_htlc_limit_msat: cmp::max(cmp::min(outbound_capacity_msat as i64, + context.counterparty_max_htlc_value_in_flight_msat as i64 + - outbound_stats.pending_htlcs_value_msat as i64), + 0) as u64, + balance_msat, + } } - /// If we receive an error message, it may only be a rejection of the channel type we tried, - /// not of our ability to open any channel at all. Thus, on error, we should first call this - /// and see if we get a new `OpenChannel` message, otherwise the channel is failed. - pub(crate) fn maybe_handle_error_without_close(&mut self, chain_hash: BlockHash) -> Result { - if !self.is_outbound() || self.context.channel_state != ChannelState::OurInitSent as u32 { return Err(()); } - if self.context.channel_type == ChannelTypeFeatures::only_static_remote_key() { - // We've exhausted our options - return Err(()); - } - // We support opening a few different types of channels. Try removing our additional - // features one by one until we've either arrived at our default or the counterparty has - // accepted one. - // - // Due to the order below, we may not negotiate `option_anchors_zero_fee_htlc_tx` if the - // counterparty doesn't support `option_scid_privacy`. Since `get_initial_channel_type` - // checks whether the counterparty supports every feature, this would only happen if the - // counterparty is advertising the feature, but rejecting channels proposing the feature for - // whatever reason. - if self.context.channel_type.supports_anchors_zero_fee_htlc_tx() { - self.context.channel_type.clear_anchors_zero_fee_htlc_tx(); - assert!(self.context.channel_transaction_parameters.opt_non_zero_fee_anchors.is_none()); - self.context.channel_transaction_parameters.opt_anchors = None; - } else if self.context.channel_type.supports_scid_privacy() { - self.context.channel_type.clear_scid_privacy(); + /// Returns a HTLCStats about inbound pending htlcs + fn get_inbound_pending_htlc_stats(&'a self, outbound_feerate_update: Option) -> HTLCStats { + let context = self.get_context(); + let mut stats = HTLCStats { + pending_htlcs: context.pending_inbound_htlcs.len() as u32, + pending_htlcs_value_msat: 0, + on_counterparty_tx_dust_exposure_msat: 0, + on_holder_tx_dust_exposure_msat: 0, + holding_cell_msat: 0, + on_holder_tx_holding_cell_htlcs_count: 0, + }; + + let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if self.opt_anchors() { + (0, 0) } else { - self.context.channel_type = ChannelTypeFeatures::only_static_remote_key(); + let dust_buffer_feerate = self.get_dust_buffer_feerate(outbound_feerate_update) as u64; + (dust_buffer_feerate * htlc_timeout_tx_weight(false) / 1000, + dust_buffer_feerate * htlc_success_tx_weight(false) / 1000) + }; + let counterparty_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.counterparty_dust_limit_satoshis; + let holder_dust_limit_success_sat = htlc_success_dust_limit + context.holder_dust_limit_satoshis; + for ref htlc in context.pending_inbound_htlcs.iter() { + stats.pending_htlcs_value_msat += htlc.amount_msat; + if htlc.amount_msat / 1000 < counterparty_dust_limit_timeout_sat { + stats.on_counterparty_tx_dust_exposure_msat += htlc.amount_msat; + } + if htlc.amount_msat / 1000 < holder_dust_limit_success_sat { + stats.on_holder_tx_dust_exposure_msat += htlc.amount_msat; + } } - Ok(self.get_open_channel(chain_hash)) + stats } - // Constructors: - pub fn new_outbound( - fee_estimator: &LowerBoundedFeeEstimator, entropy_source: &ES, signer_provider: &SP, counterparty_node_id: PublicKey, their_features: &InitFeatures, - channel_value_satoshis: u64, push_msat: u64, user_id: u128, config: &UserConfig, current_chain_height: u32, - outbound_scid_alias: u64 - ) -> Result, APIError> - where ES::Target: EntropySource, - SP::Target: SignerProvider, - F::Target: FeeEstimator, - { - let holder_selected_contest_delay = config.channel_handshake_config.our_to_self_delay; - let channel_keys_id = signer_provider.generate_channel_keys_id(false, channel_value_satoshis, user_id); - let holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id); - let pubkeys = holder_signer.pubkeys().clone(); + /// Returns a HTLCStats about pending outbound htlcs, *including* pending adds in our holding cell. + fn get_outbound_pending_htlc_stats(&'a self, outbound_feerate_update: Option) -> HTLCStats { + let context = self.get_context(); + let mut stats = HTLCStats { + pending_htlcs: context.pending_outbound_htlcs.len() as u32, + pending_htlcs_value_msat: 0, + on_counterparty_tx_dust_exposure_msat: 0, + on_holder_tx_dust_exposure_msat: 0, + holding_cell_msat: 0, + on_holder_tx_holding_cell_htlcs_count: 0, + }; - if !their_features.supports_wumbo() && channel_value_satoshis > MAX_FUNDING_SATOSHIS_NO_WUMBO { - return Err(APIError::APIMisuseError{err: format!("funding_value must not exceed {}, it was {}", MAX_FUNDING_SATOSHIS_NO_WUMBO, channel_value_satoshis)}); - } - if channel_value_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS { - return Err(APIError::APIMisuseError{err: format!("funding_value must be smaller than the total bitcoin supply, it was {}", channel_value_satoshis)}); - } - let channel_value_msat = channel_value_satoshis * 1000; - if push_msat > channel_value_msat { - return Err(APIError::APIMisuseError { err: format!("Push value ({}) was larger than channel_value ({})", push_msat, channel_value_msat) }); - } - if holder_selected_contest_delay < BREAKDOWN_TIMEOUT { - return Err(APIError::APIMisuseError {err: format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks", holder_selected_contest_delay)}); - } - let holder_selected_channel_reserve_satoshis = FundedChannel::::get_holder_selected_channel_reserve_satoshis(channel_value_satoshis, config); - if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS { - // Protocol level safety check in place, although it should never happen because - // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS` - return Err(APIError::APIMisuseError { err: format!("Holder selected channel reserve below implemention limit dust_limit_satoshis {}", holder_selected_channel_reserve_satoshis) }); + let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if self.opt_anchors() { + (0, 0) + } else { + let dust_buffer_feerate = self.get_dust_buffer_feerate(outbound_feerate_update) as u64; + (dust_buffer_feerate * htlc_timeout_tx_weight(false) / 1000, + dust_buffer_feerate * htlc_success_tx_weight(false) / 1000) + }; + let counterparty_dust_limit_success_sat = htlc_success_dust_limit + context.counterparty_dust_limit_satoshis; + let holder_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.holder_dust_limit_satoshis; + for ref htlc in context.pending_outbound_htlcs.iter() { + stats.pending_htlcs_value_msat += htlc.amount_msat; + if htlc.amount_msat / 1000 < counterparty_dust_limit_success_sat { + stats.on_counterparty_tx_dust_exposure_msat += htlc.amount_msat; + } + if htlc.amount_msat / 1000 < holder_dust_limit_timeout_sat { + stats.on_holder_tx_dust_exposure_msat += htlc.amount_msat; + } } - let channel_type = Self::get_initial_channel_type(&config, their_features); - debug_assert!(channel_type.is_subset(&channelmanager::provided_channel_type_features(&config))); - - let feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Normal); - - let value_to_self_msat = channel_value_satoshis * 1000 - push_msat; - let commitment_tx_fee = Self::commit_tx_fee_msat(feerate, MIN_AFFORDABLE_HTLC_COUNT, channel_type.requires_anchors_zero_fee_htlc_tx()); - if value_to_self_msat < commitment_tx_fee { - return Err(APIError::APIMisuseError{ err: format!("Funding amount ({}) can't even pay fee for initial commitment transaction fee of {}.", value_to_self_msat / 1000, commitment_tx_fee / 1000) }); + for update in context.holding_cell_htlc_updates.iter() { + if let &HTLCUpdateAwaitingACK::AddHTLC { ref amount_msat, .. } = update { + stats.pending_htlcs += 1; + stats.pending_htlcs_value_msat += amount_msat; + stats.holding_cell_msat += amount_msat; + if *amount_msat / 1000 < counterparty_dust_limit_success_sat { + stats.on_counterparty_tx_dust_exposure_msat += amount_msat; + } + if *amount_msat / 1000 < holder_dust_limit_timeout_sat { + stats.on_holder_tx_dust_exposure_msat += amount_msat; + } else { + stats.on_holder_tx_holding_cell_htlcs_count += 1; + } + } } + stats + } - let mut secp_ctx = Secp256k1::new(); - secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes()); - - let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey { - Some(signer_provider.get_shutdown_scriptpubkey()) - } else { None }; + fn get_holder_counterparty_selected_channel_reserve_satoshis(&'a self) -> (u64, Option) { + (self.get_context().holder_selected_channel_reserve_satoshis, self.get_context().counterparty_selected_channel_reserve_satoshis) + } - if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey { - if !shutdown_scriptpubkey.is_compatible(&their_features) { - return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() }); - } - } + /// Returns true if we've ever received a message from the remote end for this Channel + fn have_received_message(&'a self) -> bool { + self.get_context().channel_state > (ChannelState::OurInitSent as u32) + } - Ok(FundedChannel { - context: ChannelContext { - user_id, + /// Gets the latest commitment transaction and any dependent transactions for relay (forcing + /// shutdown of this channel - no more calls into this Channel may be made afterwards except + /// those explicitly stated to be allowed after shutdown completes, eg some simple getters). + /// Also returns the list of payment_hashes for channels which we can safely fail backwards + /// immediately (others we will have to allow to time out). + fn force_shutdown(&'a mut self, should_broadcast: bool) -> (Option<(OutPoint, ChannelMonitorUpdate)>, Vec<(HTLCSource, PaymentHash, PublicKey, [u8; 32])>) { + let context = self.get_context_mut(); + // Note that we MUST only generate a monitor update that indicates force-closure - we're + // called during initialization prior to the chain_monitor in the encompassing ChannelManager + // being fully configured in some cases. Thus, its likely any monitor events we generate will + // be delayed in being processed! See the docs for `ChannelManagerReadArgs` for more. + assert!(context.channel_state != ChannelState::ShutdownComplete as u32); - config: LegacyChannelConfig { - options: config.channel_config.clone(), - announced_channel: config.channel_handshake_config.announced_channel, - commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey, + // We go ahead and "free" any holding cell HTLCs or HTLCs we haven't yet committed to and + // return them to fail the payment. + let mut dropped_outbound_htlcs = Vec::with_capacity(context.holding_cell_htlc_updates.len()); + let counterparty_node_id = context.counterparty_node_id; + for htlc_update in context.holding_cell_htlc_updates.drain(..) { + match htlc_update { + HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. } => { + dropped_outbound_htlcs.push((source, payment_hash, counterparty_node_id, context.channel_id)); }, + _ => {} + } + } + let monitor_update = if let Some(funding_txo) = context.channel_transaction_parameters.funding_outpoint { + // If we haven't yet exchanged funding signatures (ie channel_state < FundingSent), + // returning a channel monitor update here would imply a channel monitor update before + // we even registered the channel monitor to begin with, which is invalid. + // Thus, if we aren't actually at a point where we could conceivably broadcast the + // funding transaction, don't return a funding txo (which prevents providing the + // monitor update to the user, even if we return one). + // See test_duplicate_chan_id and test_pre_lockin_no_chan_closed_update for more. + if context.channel_state & (ChannelState::FundingSent as u32 | ChannelState::ChannelReady as u32 | ChannelState::ShutdownComplete as u32) != 0 { + context.latest_monitor_update_id += 1; + Some((funding_txo, ChannelMonitorUpdate { + update_id: context.latest_monitor_update_id, + updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast }], + })) + } else { None } + } else { None }; - prev_config: None, + context.channel_state = ChannelState::ShutdownComplete as u32; + context.update_time_counter += 1; + (monitor_update, dropped_outbound_htlcs) + } - inbound_handshake_limits_override: Some(config.channel_handshake_limits.clone()), + fn get_dust_buffer_feerate(&'a self, outbound_feerate_update: Option) -> u32 { + let context = self.get_context(); + // When calculating our exposure to dust HTLCs, we assume that the channel feerate + // may, at any point, increase by at least 10 sat/vB (i.e 2530 sat/kWU) or 25%, + // whichever is higher. This ensures that we aren't suddenly exposed to significantly + // more dust balance if the feerate increases when we have several HTLCs pending + // which are near the dust limit. + let mut feerate_per_kw = context.feerate_per_kw; + // If there's a pending update fee, use it to ensure we aren't under-estimating + // potential feerate updates coming soon. + if let Some((feerate, _)) = context.pending_update_fee { + feerate_per_kw = cmp::max(feerate_per_kw, feerate); + } + if let Some(feerate) = outbound_feerate_update { + feerate_per_kw = cmp::max(feerate_per_kw, feerate); + } + cmp::max(2530, feerate_per_kw * 1250 / 1000) + } - channel_id: entropy_source.get_secure_random_bytes(), - channel_state: ChannelState::OurInitSent as u32, - announcement_sigs_state: AnnouncementSigsState::NotSent, - secp_ctx, - channel_value_satoshis, + /// Returns true if funding_created was sent/received. + fn is_funding_initiated(&'a self) -> bool { + self.get_context().channel_state >= ChannelState::FundingSent as u32 + } - latest_monitor_update_id: 0, + // Get the commitment tx fee for the local's (i.e. our) next commitment transaction based on the + // number of pending HTLCs that are on track to be in our next commitment tx, plus an additional + // HTLC if `fee_spike_buffer_htlc` is Some, plus a new HTLC given by `new_htlc_amount`. Dust HTLCs + // are excluded. + fn next_local_commit_tx_fee_msat(&'a self, htlc: HTLCCandidate, fee_spike_buffer_htlc: Option<()>) -> u64 { + assert!(self.is_outbound()); - holder_signer, - shutdown_scriptpubkey, - destination_script: signer_provider.get_destination_script(), + let context = self.get_context(); - cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER, - cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER, - value_to_self_msat, + let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if self.opt_anchors() { + (0, 0) + } else { + (context.feerate_per_kw as u64 * htlc_success_tx_weight(false) / 1000, + context.feerate_per_kw as u64 * htlc_timeout_tx_weight(false) / 1000) + }; + let real_dust_limit_success_sat = htlc_success_dust_limit + context.holder_dust_limit_satoshis; + let real_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.holder_dust_limit_satoshis; - pending_inbound_htlcs: Vec::new(), - pending_outbound_htlcs: Vec::new(), - holding_cell_htlc_updates: Vec::new(), - pending_update_fee: None, - holding_cell_update_fee: None, - next_holder_htlc_id: 0, - next_counterparty_htlc_id: 0, - update_time_counter: 1, + let mut addl_htlcs = 0; + if fee_spike_buffer_htlc.is_some() { addl_htlcs += 1; } + match htlc.origin { + HTLCInitiator::LocalOffered => { + if htlc.amount_msat / 1000 >= real_dust_limit_timeout_sat { + addl_htlcs += 1; + } + }, + HTLCInitiator::RemoteOffered => { + if htlc.amount_msat / 1000 >= real_dust_limit_success_sat { + addl_htlcs += 1; + } + } + } - resend_order: RAACommitmentOrder::CommitmentFirst, + let mut included_htlcs = 0; + for ref htlc in context.pending_inbound_htlcs.iter() { + if htlc.amount_msat / 1000 < real_dust_limit_success_sat { + continue + } + // We include LocalRemoved HTLCs here because we may still need to broadcast a commitment + // transaction including this HTLC if it times out before they RAA. + included_htlcs += 1; + } - monitor_pending_channel_ready: false, - monitor_pending_revoke_and_ack: false, - monitor_pending_commitment_signed: false, - monitor_pending_forwards: Vec::new(), - monitor_pending_failures: Vec::new(), - monitor_pending_finalized_fulfills: Vec::new(), + for ref htlc in context.pending_outbound_htlcs.iter() { + if htlc.amount_msat / 1000 < real_dust_limit_timeout_sat { + continue + } + match htlc.state { + OutboundHTLCState::LocalAnnounced {..} => included_htlcs += 1, + OutboundHTLCState::Committed => included_htlcs += 1, + OutboundHTLCState::RemoteRemoved {..} => included_htlcs += 1, + // We don't include AwaitingRemoteRevokeToRemove HTLCs because our next commitment + // transaction won't be generated until they send us their next RAA, which will mean + // dropping any HTLCs in this state. + _ => {}, + } + } - #[cfg(debug_assertions)] - holder_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)), - #[cfg(debug_assertions)] - counterparty_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)), + for htlc in context.holding_cell_htlc_updates.iter() { + match htlc { + &HTLCUpdateAwaitingACK::AddHTLC { amount_msat, .. } => { + if amount_msat / 1000 < real_dust_limit_timeout_sat { + continue + } + included_htlcs += 1 + }, + _ => {}, // Don't include claims/fails that are awaiting ack, because once we get the + // ack we're guaranteed to never include them in commitment txs anymore. + } + } - last_sent_closing_fee: None, - pending_counterparty_closing_signed: None, - closing_fee_limits: None, - target_closing_feerate_sats_per_kw: None, + let num_htlcs = included_htlcs + addl_htlcs; + let res = Self::commit_tx_fee_msat(context.feerate_per_kw, num_htlcs, self.opt_anchors()); + #[cfg(any(test, fuzzing))] + { + let mut fee = res; + if fee_spike_buffer_htlc.is_some() { + fee = Self::commit_tx_fee_msat(context.feerate_per_kw, num_htlcs - 1, self.opt_anchors()); + } + let total_pending_htlcs = context.pending_inbound_htlcs.len() + context.pending_outbound_htlcs.len() + + context.holding_cell_htlc_updates.len(); + let commitment_tx_info = CommitmentTxInfoCached { + fee, + total_pending_htlcs, + next_holder_htlc_id: match htlc.origin { + HTLCInitiator::LocalOffered => context.next_holder_htlc_id + 1, + HTLCInitiator::RemoteOffered => context.next_holder_htlc_id, + }, + next_counterparty_htlc_id: match htlc.origin { + HTLCInitiator::LocalOffered => context.next_counterparty_htlc_id, + HTLCInitiator::RemoteOffered => context.next_counterparty_htlc_id + 1, + }, + feerate: context.feerate_per_kw, + }; + *context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = Some(commitment_tx_info); + } + res + } - inbound_awaiting_accept: false, + // Get the commitment tx fee for the remote's next commitment transaction based on the number of + // pending HTLCs that are on track to be in their next commitment tx, plus an additional HTLC if + // `fee_spike_buffer_htlc` is Some, plus a new HTLC given by `new_htlc_amount`. Dust HTLCs are + // excluded. + fn next_remote_commit_tx_fee_msat(&'a self, htlc: HTLCCandidate, fee_spike_buffer_htlc: Option<()>) -> u64 { + assert!(!self.is_outbound()); - funding_tx_confirmed_in: None, - funding_tx_confirmation_height: 0, - short_channel_id: None, - channel_creation_height: current_chain_height, + let context = self.get_context(); - feerate_per_kw: feerate, - counterparty_dust_limit_satoshis: 0, - holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS, - counterparty_max_htlc_value_in_flight_msat: 0, - holder_max_htlc_value_in_flight_msat: Self::get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &config.channel_handshake_config), - counterparty_selected_channel_reserve_satoshis: None, // Filled in in accept_channel - holder_selected_channel_reserve_satoshis, - counterparty_htlc_minimum_msat: 0, - holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat }, - counterparty_max_accepted_htlcs: 0, - minimum_depth: None, // Filled in in accept_channel + let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if self.opt_anchors() { + (0, 0) + } else { + (context.feerate_per_kw as u64 * htlc_success_tx_weight(false) / 1000, + context.feerate_per_kw as u64 * htlc_timeout_tx_weight(false) / 1000) + }; + let real_dust_limit_success_sat = htlc_success_dust_limit + context.counterparty_dust_limit_satoshis; + let real_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.counterparty_dust_limit_satoshis; - counterparty_forwarding_info: None, + let mut addl_htlcs = 0; + if fee_spike_buffer_htlc.is_some() { addl_htlcs += 1; } + match htlc.origin { + HTLCInitiator::LocalOffered => { + if htlc.amount_msat / 1000 >= real_dust_limit_success_sat { + addl_htlcs += 1; + } + }, + HTLCInitiator::RemoteOffered => { + if htlc.amount_msat / 1000 >= real_dust_limit_timeout_sat { + addl_htlcs += 1; + } + } + } - channel_transaction_parameters: ChannelTransactionParameters { - holder_pubkeys: pubkeys, - holder_selected_contest_delay: config.channel_handshake_config.our_to_self_delay, - is_outbound_from_holder: true, - counterparty_parameters: None, - funding_outpoint: None, - opt_anchors: if channel_type.requires_anchors_zero_fee_htlc_tx() { Some(()) } else { None }, - opt_non_zero_fee_anchors: None + // When calculating the set of HTLCs which will be included in their next commitment_signed, all + // non-dust inbound HTLCs are included (as all states imply it will be included) and only + // committed outbound HTLCs, see below. + let mut included_htlcs = 0; + for ref htlc in context.pending_inbound_htlcs.iter() { + if htlc.amount_msat / 1000 <= real_dust_limit_timeout_sat { + continue + } + included_htlcs += 1; + } + + for ref htlc in context.pending_outbound_htlcs.iter() { + if htlc.amount_msat / 1000 <= real_dust_limit_success_sat { + continue + } + // We only include outbound HTLCs if it will not be included in their next commitment_signed, + // i.e. if they've responded to us with an RAA after announcement. + match htlc.state { + OutboundHTLCState::Committed => included_htlcs += 1, + OutboundHTLCState::RemoteRemoved {..} => included_htlcs += 1, + OutboundHTLCState::LocalAnnounced { .. } => included_htlcs += 1, + _ => {}, + } + } + + let num_htlcs = included_htlcs + addl_htlcs; + let res = Self::commit_tx_fee_msat(context.feerate_per_kw, num_htlcs, self.opt_anchors()); + #[cfg(any(test, fuzzing))] + { + let mut fee = res; + if fee_spike_buffer_htlc.is_some() { + fee = Self::commit_tx_fee_msat(context.feerate_per_kw, num_htlcs - 1, self.opt_anchors()); + } + let total_pending_htlcs = context.pending_inbound_htlcs.len() + context.pending_outbound_htlcs.len(); + let commitment_tx_info = CommitmentTxInfoCached { + fee, + total_pending_htlcs, + next_holder_htlc_id: match htlc.origin { + HTLCInitiator::LocalOffered => context.next_holder_htlc_id + 1, + HTLCInitiator::RemoteOffered => context.next_holder_htlc_id, }, - funding_transaction: None, + next_counterparty_htlc_id: match htlc.origin { + HTLCInitiator::LocalOffered => context.next_counterparty_htlc_id, + HTLCInitiator::RemoteOffered => context.next_counterparty_htlc_id + 1, + }, + feerate: context.feerate_per_kw, + }; + *context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = Some(commitment_tx_info); + } + res + } - counterparty_cur_commitment_point: None, - counterparty_prev_commitment_point: None, - counterparty_node_id, + /// Returns the value to use for `holder_max_htlc_value_in_flight_msat` as a percentage of the + /// `channel_value_satoshis` in msat, set through + /// [`ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel`] + /// + /// The effective percentage is lower bounded by 1% and upper bounded by 100%. + /// + /// [`ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel`]: crate::util::config::ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel + fn get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis: u64, config: &ChannelHandshakeConfig) -> u64 { + let configured_percent = if config.max_inbound_htlc_value_in_flight_percent_of_channel < 1 { + 1 + } else if config.max_inbound_htlc_value_in_flight_percent_of_channel > 100 { + 100 + } else { + config.max_inbound_htlc_value_in_flight_percent_of_channel as u64 + }; + channel_value_satoshis * 10 * configured_percent + } - counterparty_shutdown_scriptpubkey: None, + // Get the fee cost in MSATS of a commitment tx with a given number of HTLC outputs. + // Note that num_htlcs should not include dust HTLCs. + fn commit_tx_fee_msat(feerate_per_kw: u32, num_htlcs: usize, opt_anchors: bool) -> u64 { + // Note that we need to divide before multiplying to round properly, + // since the lowest denomination of bitcoin on-chain is the satoshi. + (commitment_tx_base_weight(opt_anchors) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) * feerate_per_kw as u64 / 1000 * 1000 + } - commitment_secrets: CounterpartyCommitmentSecrets::new(), + // Get the fee cost in SATS of a commitment tx with a given number of HTLC outputs. + // Note that num_htlcs should not include dust HTLCs. + #[inline] + fn commit_tx_fee_sat(feerate_per_kw: u32, num_htlcs: usize, opt_anchors: bool) -> u64 { + feerate_per_kw as u64 * (commitment_tx_base_weight(opt_anchors) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000 + } - channel_update_status: ChannelUpdateStatus::Enabled, - closing_signed_in_flight: false, + /// Returns a minimum channel reserve value the remote needs to maintain, + /// required by us according to the configured or default + /// [`ChannelHandshakeConfig::their_channel_reserve_proportional_millionths`] + /// + /// Guaranteed to return a value no larger than channel_value_satoshis + /// + /// This is used both for outbound and inbound channels and has lower bound + /// of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`. + fn get_holder_selected_channel_reserve_satoshis(channel_value_satoshis: u64, config: &UserConfig) -> u64 { + let calculated_reserve = channel_value_satoshis.saturating_mul(config.channel_handshake_config.their_channel_reserve_proportional_millionths as u64) / 1_000_000; + cmp::min(channel_value_satoshis, cmp::max(calculated_reserve, MIN_THEIR_CHAN_RESERVE_SATOSHIS)) + } - announcement_sigs: None, + /// This is for legacy reasons, present for forward-compatibility. + /// LDK versions older than 0.0.104 don't know how read/handle values other than default + /// from storage. Hence, we use this function to not persist default values of + /// `holder_selected_channel_reserve_satoshis` for channels into storage. + fn get_legacy_default_holder_selected_channel_reserve_satoshis(channel_value_satoshis: u64) -> u64 { + let (q, _) = channel_value_satoshis.overflowing_div(100); + cmp::min(channel_value_satoshis, cmp::max(q, 1000)) + } +} - #[cfg(any(test, fuzzing))] - next_local_commitment_tx_fee_info_cached: Mutex::new(None), - #[cfg(any(test, fuzzing))] - next_remote_commitment_tx_fee_info_cached: Mutex::new(None), +impl<'a, Signer: WriteableEcdsaChannelSigner + 'a> ChannelInterface<'a, Signer> for Channel { + fn get_context(&'a self) -> &'a ChannelContext { + match &self { + &Channel::Funded(chan) => &chan.get_context(), + &Channel::InboundV1(chan) => &chan.get_context(), + &Channel::OutboundV1(chan) => &chan.get_context(), + } + } - workaround_lnd_bug_4006: None, + fn get_context_mut(&'a mut self) -> &'a mut ChannelContext { + match self { + Channel::Funded(chan) => chan.get_context_mut(), + Channel::InboundV1(chan) => chan.get_context_mut(), + Channel::OutboundV1(chan) => chan.get_context_mut() + } + } +} - latest_inbound_scid_alias: None, - outbound_scid_alias, +impl<'a, Signer: WriteableEcdsaChannelSigner + 'a> ChannelInterface<'a, Signer> for FundedChannel { + fn get_context(&'a self) -> &'a ChannelContext { + &self.context + } - channel_ready_event_emitted: false, + fn get_context_mut(&'a mut self) -> &'a mut ChannelContext { + &mut self.context + } +} - #[cfg(any(test, fuzzing))] - historical_inbound_htlc_fulfills: HashSet::new(), +impl<'a, Signer: WriteableEcdsaChannelSigner + 'a> ChannelInterface<'a, Signer> for InboundV1Channel { + fn get_context(&'a self) -> &'a ChannelContext { + &self.context + } - channel_type, - channel_keys_id, + fn get_context_mut(&'a mut self) -> &'a mut ChannelContext { + &mut self.context + } +} - pending_monitor_updates: Vec::new(), - } - }) +impl<'a, Signer: WriteableEcdsaChannelSigner + 'a> ChannelInterface<'a, Signer> for OutboundV1Channel { + fn get_context(&'a self) -> &'a ChannelContext { + &self.context } - fn check_remote_fee(fee_estimator: &LowerBoundedFeeEstimator, - feerate_per_kw: u32, cur_feerate_per_kw: Option, logger: &L) - -> Result<(), ChannelError> where F::Target: FeeEstimator, L::Target: Logger, - { - // We only bound the fee updates on the upper side to prevent completely absurd feerates, - // always accepting up to 25 sat/vByte or 10x our fee estimator's "High Priority" fee. - // We generally don't care too much if they set the feerate to something very high, but it - // could result in the channel being useless due to everything being dust. - let upper_limit = cmp::max(250 * 25, - fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::HighPriority) as u64 * 10); - if feerate_per_kw as u64 > upper_limit { - return Err(ChannelError::Close(format!("Peer's feerate much too high. Actual: {}. Our expected upper limit: {}", feerate_per_kw, upper_limit))); - } - let lower_limit = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Background); - // Some fee estimators round up to the next full sat/vbyte (ie 250 sats per kw), causing - // occasional issues with feerate disagreements between an initiator that wants a feerate - // of 1.1 sat/vbyte and a receiver that wants 1.1 rounded up to 2. Thus, we always add 250 - // sat/kw before the comparison here. - if feerate_per_kw + 250 < lower_limit { - if let Some(cur_feerate) = cur_feerate_per_kw { - if feerate_per_kw > cur_feerate { - log_warn!(logger, - "Accepting feerate that may prevent us from closing this channel because it's higher than what we have now. Had {} s/kW, now {} s/kW.", - cur_feerate, feerate_per_kw); - return Ok(()); - } - } - return Err(ChannelError::Close(format!("Peer's feerate much too low. Actual: {}. Our expected lower limit: {} (- 250)", feerate_per_kw, lower_limit))); - } - Ok(()) + fn get_context_mut(&'a mut self) -> &'a mut ChannelContext { + &mut self.context } +} +pub(super) enum Channel { + Funded(FundedChannel), + InboundV1(InboundV1Channel), + OutboundV1(OutboundV1Channel), +} + +// A not yet funded inbound (from counterparty) channel using V1 channel establishment. +pub(super) struct InboundV1Channel { + context: ChannelContext, +} + +impl InboundV1Channel { /// Creates a new channel from a remote sides' request for one. /// Assumes chain_hash has already been checked and corresponds with what we expect! pub fn new_from_req( @@ -1439,7 +1851,7 @@ impl FundedChannel { counterparty_node_id: PublicKey, our_supported_features: &ChannelTypeFeatures, their_features: &InitFeatures, msg: &msgs::OpenChannel, user_id: u128, config: &UserConfig, current_chain_height: u32, logger: &L, outbound_scid_alias: u64 - ) -> Result, ChannelError> + ) -> Result, ChannelError> where ES::Target: EntropySource, SP::Target: SignerProvider, F::Target: FeeEstimator, @@ -1621,7 +2033,7 @@ impl FundedChannel { let mut secp_ctx = Secp256k1::new(); secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes()); - let chan = FundedChannel { + let chan = InboundV1Channel { context: ChannelContext { user_id, @@ -1752,643 +2164,561 @@ impl FundedChannel { Ok(chan) } - /// Transaction nomenclature is somewhat confusing here as there are many different cases - a - /// transaction is referred to as "a's transaction" implying that a will be able to broadcast - /// the transaction. Thus, b will generally be sending a signature over such a transaction to - /// a, and a can revoke the transaction by providing b the relevant per_commitment_secret. As - /// such, a transaction is generally the result of b increasing the amount paid to a (or adding - /// an HTLC to a). - /// @local is used only to convert relevant internal structures which refer to remote vs local - /// to decide value of outputs and direction of HTLCs. - /// @generated_by_local is used to determine *which* HTLCs to include - noting that the HTLC - /// state may indicate that one peer has informed the other that they'd like to add an HTLC but - /// have not yet committed it. Such HTLCs will only be included in transactions which are being - /// generated by the peer which proposed adding the HTLCs, and thus we need to understand both - /// which peer generated this transaction and "to whom" this transaction flows. - #[inline] - fn build_commitment_transaction(&self, commitment_number: u64, keys: &TxCreationKeys, local: bool, generated_by_local: bool, logger: &L) -> CommitmentStats - where L::Target: Logger - { - let mut included_dust_htlcs: Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)> = Vec::new(); - let num_htlcs = self.context.pending_inbound_htlcs.len() + self.context.pending_outbound_htlcs.len(); - let mut included_non_dust_htlcs: Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)> = Vec::with_capacity(num_htlcs); - - let broadcaster_dust_limit_satoshis = if local { self.context.holder_dust_limit_satoshis } else { self.context.counterparty_dust_limit_satoshis }; - let mut remote_htlc_total_msat = 0; - let mut local_htlc_total_msat = 0; - let mut value_to_self_msat_offset = 0; + fn funding_created_signature(&mut self, sig: &Signature, logger: &L) -> Result<(Txid, CommitmentTransaction, Signature), ChannelError> where L::Target: Logger { + let funding_script = self.get_funding_redeemscript(); - let mut feerate_per_kw = self.context.feerate_per_kw; - if let Some((feerate, update_state)) = self.context.pending_update_fee { - if match update_state { - // Note that these match the inclusion criteria when scanning - // pending_inbound_htlcs below. - FeeUpdateState::RemoteAnnounced => { debug_assert!(!self.is_outbound()); !generated_by_local }, - FeeUpdateState::AwaitingRemoteRevokeToAnnounce => { debug_assert!(!self.is_outbound()); !generated_by_local }, - FeeUpdateState::Outbound => { assert!(self.is_outbound()); generated_by_local }, - } { - feerate_per_kw = feerate; - } + let keys = self.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number); + let initial_commitment_tx = self.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger).tx; + { + let trusted_tx = initial_commitment_tx.trust(); + let initial_commitment_bitcoin_tx = trusted_tx.built_transaction(); + let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis); + // They sign the holder commitment transaction... + log_trace!(logger, "Checking funding_created tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} for channel {}.", + log_bytes!(sig.serialize_compact()[..]), log_bytes!(self.counterparty_funding_pubkey().serialize()), + encode::serialize_hex(&initial_commitment_bitcoin_tx.transaction), log_bytes!(sighash[..]), + encode::serialize_hex(&funding_script), log_bytes!(self.channel_id())); + secp_check!(self.context.secp_ctx.verify_ecdsa(&sighash, &sig, self.counterparty_funding_pubkey()), "Invalid funding_created signature from peer".to_owned()); } - log_trace!(logger, "Building commitment transaction number {} (really {} xor {}) for channel {} for {}, generated by {} with fee {}...", - commitment_number, (INITIAL_COMMITMENT_NUMBER - commitment_number), - get_commitment_transaction_number_obscure_factor(&self.get_holder_pubkeys().payment_point, &self.get_counterparty_pubkeys().payment_point, self.is_outbound()), - log_bytes!(self.context.channel_id), if local { "us" } else { "remote" }, if generated_by_local { "us" } else { "remote" }, feerate_per_kw); + let counterparty_keys = self.build_remote_transaction_keys(); + let counterparty_initial_commitment_tx = self.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx; - macro_rules! get_htlc_in_commitment { - ($htlc: expr, $offered: expr) => { - HTLCOutputInCommitment { - offered: $offered, - amount_msat: $htlc.amount_msat, - cltv_expiry: $htlc.cltv_expiry, - payment_hash: $htlc.payment_hash, - transaction_output_index: None - } - } - } + let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust(); + let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction(); + log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}", + log_bytes!(self.channel_id()), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction)); - macro_rules! add_htlc_output { - ($htlc: expr, $outbound: expr, $source: expr, $state_name: expr) => { - if $outbound == local { // "offered HTLC output" - let htlc_in_tx = get_htlc_in_commitment!($htlc, true); - let htlc_tx_fee = if self.opt_anchors() { - 0 - } else { - feerate_per_kw as u64 * htlc_timeout_tx_weight(false) / 1000 - }; - if $htlc.amount_msat / 1000 >= broadcaster_dust_limit_satoshis + htlc_tx_fee { - log_trace!(logger, " ...including {} {} HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, log_bytes!($htlc.payment_hash.0), $htlc.amount_msat); - included_non_dust_htlcs.push((htlc_in_tx, $source)); - } else { - log_trace!(logger, " ...including {} {} dust HTLC {} (hash {}) with value {} due to dust limit", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, log_bytes!($htlc.payment_hash.0), $htlc.amount_msat); - included_dust_htlcs.push((htlc_in_tx, $source)); - } - } else { - let htlc_in_tx = get_htlc_in_commitment!($htlc, false); - let htlc_tx_fee = if self.opt_anchors() { - 0 - } else { - feerate_per_kw as u64 * htlc_success_tx_weight(false) / 1000 - }; - if $htlc.amount_msat / 1000 >= broadcaster_dust_limit_satoshis + htlc_tx_fee { - log_trace!(logger, " ...including {} {} HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, log_bytes!($htlc.payment_hash.0), $htlc.amount_msat); - included_non_dust_htlcs.push((htlc_in_tx, $source)); - } else { - log_trace!(logger, " ...including {} {} dust HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, log_bytes!($htlc.payment_hash.0), $htlc.amount_msat); - included_dust_htlcs.push((htlc_in_tx, $source)); - } - } - } + let counterparty_signature = self.context.holder_signer.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), &self.context.secp_ctx) + .map_err(|_| ChannelError::Close("Failed to get signatures for new commitment_signed".to_owned()))?.0; + + // We sign "counterparty" commitment transaction, allowing them to broadcast the tx if they wish. + Ok((counterparty_initial_bitcoin_tx.txid, initial_commitment_tx, counterparty_signature)) + } + + pub fn funding_created( + mut self, msg: &msgs::FundingCreated, best_block: BestBlock, signer_provider: &SP, logger: &L + ) -> Result<(FundedChannel, msgs::FundingSigned, ChannelMonitor), (Self, ChannelError)> + where + SP::Target: SignerProvider, + L::Target: Logger + { + if self.is_outbound() { + return Err((self, ChannelError::Close("Received funding_created for an outbound channel?".to_owned()))); + } + if self.context.channel_state != (ChannelState::OurInitSent as u32 | ChannelState::TheirInitSent as u32) { + // BOLT 2 says that if we disconnect before we send funding_signed we SHOULD NOT + // remember the channel, so it's safe to just send an error_message here and drop the + // channel. + return Err((self, ChannelError::Close("Received funding_created after we got the channel!".to_owned()))); + } + if self.context.inbound_awaiting_accept { + return Err((self, ChannelError::Close("FundingCreated message received before the channel was accepted".to_owned()))); + } + if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) || + self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER || + self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER { + panic!("Should not have advanced channel commitment tx numbers prior to funding_created"); } - for ref htlc in self.context.pending_inbound_htlcs.iter() { - let (include, state_name) = match htlc.state { - InboundHTLCState::RemoteAnnounced(_) => (!generated_by_local, "RemoteAnnounced"), - InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) => (!generated_by_local, "AwaitingRemoteRevokeToAnnounce"), - InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) => (true, "AwaitingAnnouncedRemoteRevoke"), - InboundHTLCState::Committed => (true, "Committed"), - InboundHTLCState::LocalRemoved(_) => (!generated_by_local, "LocalRemoved"), - }; + let funding_txo = OutPoint { txid: msg.funding_txid, index: msg.funding_output_index }; + self.context.channel_transaction_parameters.funding_outpoint = Some(funding_txo); + // This is an externally observable change before we finish all our checks. In particular + // funding_created_signature may fail. + self.context.holder_signer.provide_channel_parameters(&self.context.channel_transaction_parameters); - if include { - add_htlc_output!(htlc, false, None, state_name); - remote_htlc_total_msat += htlc.amount_msat; - } else { - log_trace!(logger, " ...not including inbound HTLC {} (hash {}) with value {} due to state ({})", htlc.htlc_id, log_bytes!(htlc.payment_hash.0), htlc.amount_msat, state_name); - match &htlc.state { - &InboundHTLCState::LocalRemoved(ref reason) => { - if generated_by_local { - if let &InboundHTLCRemovalReason::Fulfill(_) = reason { - value_to_self_msat_offset += htlc.amount_msat as i64; - } - } - }, - _ => {}, - } + let (counterparty_initial_commitment_txid, initial_commitment_tx, signature) = match self.funding_created_signature(&msg.signature, logger) { + Ok(res) => res, + Err(ChannelError::Close(e)) => { + self.context.channel_transaction_parameters.funding_outpoint = None; + return Err((self, ChannelError::Close(e))); + }, + Err(e) => { + // The only error we know how to handle is ChannelError::Close, so we fall over here + // to make sure we don't continue with an inconsistent state. + panic!("unexpected error type from funding_created_signature {:?}", e); } + }; + + let holder_commitment_tx = HolderCommitmentTransaction::new( + initial_commitment_tx, + msg.signature, + Vec::new(), + &self.get_holder_pubkeys().funding_pubkey, + self.counterparty_funding_pubkey() + ); + + if let Err(_) = self.context.holder_signer.validate_holder_commitment(&holder_commitment_tx, Vec::new()) { + return Err((self, ChannelError::Close("Failed to validate our commitment".to_owned()))); } - let mut preimages: Vec = Vec::new(); + // Now that we're past error-generating stuff, update our local state: - for ref htlc in self.context.pending_outbound_htlcs.iter() { - let (include, state_name) = match htlc.state { - OutboundHTLCState::LocalAnnounced(_) => (generated_by_local, "LocalAnnounced"), - OutboundHTLCState::Committed => (true, "Committed"), - OutboundHTLCState::RemoteRemoved(_) => (generated_by_local, "RemoteRemoved"), - OutboundHTLCState::AwaitingRemoteRevokeToRemove(_) => (generated_by_local, "AwaitingRemoteRevokeToRemove"), - OutboundHTLCState::AwaitingRemovedRemoteRevoke(_) => (false, "AwaitingRemovedRemoteRevoke"), - }; + let mut channel = FundedChannel { + context: self.context, + }; - let preimage_opt = match htlc.state { - OutboundHTLCState::RemoteRemoved(OutboundHTLCOutcome::Success(p)) => p, - OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(p)) => p, - OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(p)) => p, - _ => None, - }; + let funding_redeemscript = channel.get_funding_redeemscript(); + let funding_txo_script = funding_redeemscript.to_v0_p2wsh(); + let obscure_factor = get_commitment_transaction_number_obscure_factor(&channel.get_holder_pubkeys().payment_point, &channel.get_counterparty_pubkeys().payment_point, channel.is_outbound()); + let shutdown_script = channel.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner()); + let mut monitor_signer = signer_provider.derive_channel_signer(channel.context.channel_value_satoshis, channel.context.channel_keys_id); + monitor_signer.provide_channel_parameters(&channel.context.channel_transaction_parameters); + let channel_monitor = ChannelMonitor::new(channel.context.secp_ctx.clone(), monitor_signer, + shutdown_script, channel.get_holder_selected_contest_delay(), + &channel.context.destination_script, (funding_txo, funding_txo_script.clone()), + &channel.context.channel_transaction_parameters, + funding_redeemscript.clone(), channel.context.channel_value_satoshis, + obscure_factor, + holder_commitment_tx, best_block, channel.context.counterparty_node_id); - if let Some(preimage) = preimage_opt { - preimages.push(preimage); - } + channel_monitor.provide_latest_counterparty_commitment_tx(counterparty_initial_commitment_txid, Vec::new(), channel.context.cur_counterparty_commitment_transaction_number, channel.context.counterparty_cur_commitment_point.unwrap(), logger); - if include { - add_htlc_output!(htlc, true, Some(&htlc.source), state_name); - local_htlc_total_msat += htlc.amount_msat; - } else { - log_trace!(logger, " ...not including outbound HTLC {} (hash {}) with value {} due to state ({})", htlc.htlc_id, log_bytes!(htlc.payment_hash.0), htlc.amount_msat, state_name); - match htlc.state { - OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_))|OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) => { - value_to_self_msat_offset -= htlc.amount_msat as i64; - }, - OutboundHTLCState::RemoteRemoved(OutboundHTLCOutcome::Success(_)) => { - if !generated_by_local { - value_to_self_msat_offset -= htlc.amount_msat as i64; - } - }, - _ => {}, - } - } - } + channel.context.channel_state = ChannelState::FundingSent as u32; + channel.context.channel_id = funding_txo.to_channel_id(); + channel.context.cur_counterparty_commitment_transaction_number -= 1; + channel.context.cur_holder_commitment_transaction_number -= 1; - let mut value_to_self_msat: i64 = (self.context.value_to_self_msat - local_htlc_total_msat) as i64 + value_to_self_msat_offset; - assert!(value_to_self_msat >= 0); - // Note that in case they have several just-awaiting-last-RAA fulfills in-progress (ie - // AwaitingRemoteRevokeToRemove or AwaitingRemovedRemoteRevoke) we may have allowed them to - // "violate" their reserve value by couting those against it. Thus, we have to convert - // everything to i64 before subtracting as otherwise we can overflow. - let mut value_to_remote_msat: i64 = (self.context.channel_value_satoshis * 1000) as i64 - (self.context.value_to_self_msat as i64) - (remote_htlc_total_msat as i64) - value_to_self_msat_offset; - assert!(value_to_remote_msat >= 0); + log_info!(logger, "Generated funding_signed for peer for channel {}", log_bytes!(channel.channel_id())); - #[cfg(debug_assertions)] - { - // Make sure that the to_self/to_remote is always either past the appropriate - // channel_reserve *or* it is making progress towards it. - let mut broadcaster_max_commitment_tx_output = if generated_by_local { - self.context.holder_max_commitment_tx_output.lock().unwrap() - } else { - self.context.counterparty_max_commitment_tx_output.lock().unwrap() - }; - debug_assert!(broadcaster_max_commitment_tx_output.0 <= value_to_self_msat as u64 || value_to_self_msat / 1000 >= self.context.counterparty_selected_channel_reserve_satoshis.unwrap() as i64); - broadcaster_max_commitment_tx_output.0 = cmp::max(broadcaster_max_commitment_tx_output.0, value_to_self_msat as u64); - debug_assert!(broadcaster_max_commitment_tx_output.1 <= value_to_remote_msat as u64 || value_to_remote_msat / 1000 >= self.context.holder_selected_channel_reserve_satoshis as i64); - broadcaster_max_commitment_tx_output.1 = cmp::max(broadcaster_max_commitment_tx_output.1, value_to_remote_msat as u64); - } + let need_channel_ready = channel.check_get_channel_ready(0).is_some(); + channel.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new()); + let channel_id = channel.context.channel_id; - let total_fee_sat = FundedChannel::::commit_tx_fee_sat(feerate_per_kw, included_non_dust_htlcs.len(), self.context.channel_transaction_parameters.opt_anchors.is_some()); - let anchors_val = if self.context.channel_transaction_parameters.opt_anchors.is_some() { ANCHOR_OUTPUT_VALUE_SATOSHI * 2 } else { 0 } as i64; - let (value_to_self, value_to_remote) = if self.is_outbound() { - (value_to_self_msat / 1000 - anchors_val - total_fee_sat as i64, value_to_remote_msat / 1000) - } else { - (value_to_self_msat / 1000, value_to_remote_msat / 1000 - anchors_val - total_fee_sat as i64) - }; + Ok((channel, msgs::FundingSigned { + channel_id, + signature + }, channel_monitor)) + } - let mut value_to_a = if local { value_to_self } else { value_to_remote }; - let mut value_to_b = if local { value_to_remote } else { value_to_self }; - let (funding_pubkey_a, funding_pubkey_b) = if local { - (self.get_holder_pubkeys().funding_pubkey, self.get_counterparty_pubkeys().funding_pubkey) - } else { - (self.get_counterparty_pubkeys().funding_pubkey, self.get_holder_pubkeys().funding_pubkey) - }; + pub fn inbound_is_awaiting_accept(&self) -> bool { + self.context.inbound_awaiting_accept + } - if value_to_a >= (broadcaster_dust_limit_satoshis as i64) { - log_trace!(logger, " ...including {} output with value {}", if local { "to_local" } else { "to_remote" }, value_to_a); - } else { - value_to_a = 0; - } + /// Sets this channel to accepting 0conf, must be done before `get_accept_channel` + pub fn set_0conf(&mut self) { + assert!(self.context.inbound_awaiting_accept); + self.context.minimum_depth = Some(0); + } - if value_to_b >= (broadcaster_dust_limit_satoshis as i64) { - log_trace!(logger, " ...including {} output with value {}", if local { "to_remote" } else { "to_local" }, value_to_b); - } else { - value_to_b = 0; + /// Marks an inbound channel as accepted and generates a [`msgs::AcceptChannel`] message which + /// should be sent back to the counterparty node. + /// + /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel + pub fn accept_inbound_channel(&mut self, user_id: u128) -> msgs::AcceptChannel { + if self.is_outbound() { + panic!("Tried to send accept_channel for an outbound channel?"); + } + if self.context.channel_state != (ChannelState::OurInitSent as u32) | (ChannelState::TheirInitSent as u32) { + panic!("Tried to send accept_channel after channel had moved forward"); + } + if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER { + panic!("Tried to send an accept_channel for a channel that has already advanced"); + } + if !self.context.inbound_awaiting_accept { + panic!("The inbound channel has already been accepted"); } - let num_nondust_htlcs = included_non_dust_htlcs.len(); + self.context.user_id = user_id; + self.context.inbound_awaiting_accept = false; - let channel_parameters = - if local { self.context.channel_transaction_parameters.as_holder_broadcastable() } - else { self.context.channel_transaction_parameters.as_counterparty_broadcastable() }; - let tx = CommitmentTransaction::new_with_auxiliary_htlc_data(commitment_number, - value_to_a as u64, - value_to_b as u64, - self.context.channel_transaction_parameters.opt_anchors.is_some(), - funding_pubkey_a, - funding_pubkey_b, - keys.clone(), - feerate_per_kw, - &mut included_non_dust_htlcs, - &channel_parameters - ); - let mut htlcs_included = included_non_dust_htlcs; - // The unwrap is safe, because all non-dust HTLCs have been assigned an output index - htlcs_included.sort_unstable_by_key(|h| h.0.transaction_output_index.unwrap()); - htlcs_included.append(&mut included_dust_htlcs); + self.generate_accept_channel_message() + } - // For the stats, trimmed-to-0 the value in msats accordingly - value_to_self_msat = if (value_to_self_msat * 1000) < broadcaster_dust_limit_satoshis as i64 { 0 } else { value_to_self_msat }; - value_to_remote_msat = if (value_to_remote_msat * 1000) < broadcaster_dust_limit_satoshis as i64 { 0 } else { value_to_remote_msat }; + /// This function is used to explicitly generate a [`msgs::AcceptChannel`] message for an + /// inbound channel. If the intention is to accept an inbound channel, use + /// [`Channel::accept_inbound_channel`] instead. + /// + /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel + fn generate_accept_channel_message(&self) -> msgs::AcceptChannel { + let first_per_commitment_point = self.context.holder_signer.get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx); + let keys = self.get_holder_pubkeys(); - CommitmentStats { - tx, - feerate_per_kw, - total_fee_sat, - num_nondust_htlcs, - htlcs_included, - local_balance_msat: value_to_self_msat as u64, - remote_balance_msat: value_to_remote_msat as u64, - preimages + msgs::AcceptChannel { + temporary_channel_id: self.context.channel_id, + dust_limit_satoshis: self.context.holder_dust_limit_satoshis, + max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat, + channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis, + htlc_minimum_msat: self.context.holder_htlc_minimum_msat, + minimum_depth: self.context.minimum_depth.unwrap(), + to_self_delay: self.get_holder_selected_contest_delay(), + max_accepted_htlcs: OUR_MAX_HTLCS, + funding_pubkey: keys.funding_pubkey, + revocation_basepoint: keys.revocation_basepoint, + payment_point: keys.payment_point, + delayed_payment_basepoint: keys.delayed_payment_basepoint, + htlc_basepoint: keys.htlc_basepoint, + first_per_commitment_point, + shutdown_scriptpubkey: OptionalField::Present(match &self.context.shutdown_scriptpubkey { + Some(script) => script.clone().into_inner(), + None => Builder::new().into_script(), + }), + channel_type: Some(self.context.channel_type.clone()), } } - #[inline] - fn get_closing_scriptpubkey(&self) -> Script { - // The shutdown scriptpubkey is set on channel opening when option_upfront_shutdown_script - // is signaled. Otherwise, it is set when sending a shutdown message. Calling this method - // outside of those situations will fail. - self.context.shutdown_scriptpubkey.clone().unwrap().into_inner() + /// Enables the possibility for tests to extract a [`msgs::AcceptChannel`] message for an + /// inbound channel without accepting it. + /// + /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel + #[cfg(test)] + pub fn get_accept_channel_message(&self) -> msgs::AcceptChannel { + self.generate_accept_channel_message() } +} - #[inline] - fn get_closing_transaction_weight(&self, a_scriptpubkey: Option<&Script>, b_scriptpubkey: Option<&Script>) -> u64 { - let mut ret = - (4 + // version - 1 + // input count - 36 + // prevout - 1 + // script length (0) - 4 + // sequence - 1 + // output count - 4 // lock time - )*4 + // * 4 for non-witness parts - 2 + // witness marker and flag - 1 + // witness element count - 4 + // 4 element lengths (2 sigs, multisig dummy, and witness script) - self.get_funding_redeemscript().len() as u64 + // funding witness script - 2*(1 + 71); // two signatures + sighash type flags - if let Some(spk) = a_scriptpubkey { - ret += ((8+1) + // output values and script length - spk.len() as u64) * 4; // scriptpubkey and witness multiplier +// A not-yet-funded outbound (from holder) channel using V1 channel establishment. +pub(super) struct OutboundV1Channel { + #[cfg(not(test))] + context: ChannelContext, + #[cfg(test)] + pub context: ChannelContext, +} + +impl OutboundV1Channel { + pub fn new_outbound( + fee_estimator: &LowerBoundedFeeEstimator, entropy_source: &ES, signer_provider: &SP, counterparty_node_id: PublicKey, their_features: &InitFeatures, + channel_value_satoshis: u64, push_msat: u64, user_id: u128, config: &UserConfig, current_chain_height: u32, + outbound_scid_alias: u64 + ) -> Result, APIError> + where ES::Target: EntropySource, + SP::Target: SignerProvider, + F::Target: FeeEstimator, + { + let holder_selected_contest_delay = config.channel_handshake_config.our_to_self_delay; + let channel_keys_id = signer_provider.generate_channel_keys_id(false, channel_value_satoshis, user_id); + let holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id); + let pubkeys = holder_signer.pubkeys().clone(); + + if !their_features.supports_wumbo() && channel_value_satoshis > MAX_FUNDING_SATOSHIS_NO_WUMBO { + return Err(APIError::APIMisuseError{err: format!("funding_value must not exceed {}, it was {}", MAX_FUNDING_SATOSHIS_NO_WUMBO, channel_value_satoshis)}); } - if let Some(spk) = b_scriptpubkey { - ret += ((8+1) + // output values and script length - spk.len() as u64) * 4; // scriptpubkey and witness multiplier + if channel_value_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS { + return Err(APIError::APIMisuseError{err: format!("funding_value must be smaller than the total bitcoin supply, it was {}", channel_value_satoshis)}); + } + let channel_value_msat = channel_value_satoshis * 1000; + if push_msat > channel_value_msat { + return Err(APIError::APIMisuseError { err: format!("Push value ({}) was larger than channel_value ({})", push_msat, channel_value_msat) }); + } + if holder_selected_contest_delay < BREAKDOWN_TIMEOUT { + return Err(APIError::APIMisuseError {err: format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks", holder_selected_contest_delay)}); + } + let holder_selected_channel_reserve_satoshis = FundedChannel::::get_holder_selected_channel_reserve_satoshis(channel_value_satoshis, config); + if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS { + // Protocol level safety check in place, although it should never happen because + // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS` + return Err(APIError::APIMisuseError { err: format!("Holder selected channel reserve below implemention limit dust_limit_satoshis {}", holder_selected_channel_reserve_satoshis) }); } - ret - } - #[inline] - fn build_closing_transaction(&self, proposed_total_fee_satoshis: u64, skip_remote_output: bool) -> (ClosingTransaction, u64) { - assert!(self.context.pending_inbound_htlcs.is_empty()); - assert!(self.context.pending_outbound_htlcs.is_empty()); - assert!(self.context.pending_update_fee.is_none()); + let channel_type = Self::get_initial_channel_type(&config, their_features); + debug_assert!(channel_type.is_subset(&channelmanager::provided_channel_type_features(&config))); - let mut total_fee_satoshis = proposed_total_fee_satoshis; - let mut value_to_holder: i64 = (self.context.value_to_self_msat as i64) / 1000 - if self.is_outbound() { total_fee_satoshis as i64 } else { 0 }; - let mut value_to_counterparty: i64 = ((self.context.channel_value_satoshis * 1000 - self.context.value_to_self_msat) as i64 / 1000) - if self.is_outbound() { 0 } else { total_fee_satoshis as i64 }; + let feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Normal); - if value_to_holder < 0 { - assert!(self.is_outbound()); - total_fee_satoshis += (-value_to_holder) as u64; - } else if value_to_counterparty < 0 { - assert!(!self.is_outbound()); - total_fee_satoshis += (-value_to_counterparty) as u64; + let value_to_self_msat = channel_value_satoshis * 1000 - push_msat; + let commitment_tx_fee = Self::commit_tx_fee_msat(feerate, MIN_AFFORDABLE_HTLC_COUNT, channel_type.requires_anchors_zero_fee_htlc_tx()); + if value_to_self_msat < commitment_tx_fee { + return Err(APIError::APIMisuseError{ err: format!("Funding amount ({}) can't even pay fee for initial commitment transaction fee of {}.", value_to_self_msat / 1000, commitment_tx_fee / 1000) }); } - if skip_remote_output || value_to_counterparty as u64 <= self.context.holder_dust_limit_satoshis { - value_to_counterparty = 0; - } + let mut secp_ctx = Secp256k1::new(); + secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes()); - if value_to_holder as u64 <= self.context.holder_dust_limit_satoshis { - value_to_holder = 0; + let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey { + Some(signer_provider.get_shutdown_scriptpubkey()) + } else { None }; + + if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey { + if !shutdown_scriptpubkey.is_compatible(&their_features) { + return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() }); + } } - assert!(self.context.shutdown_scriptpubkey.is_some()); - let holder_shutdown_script = self.get_closing_scriptpubkey(); - let counterparty_shutdown_script = self.context.counterparty_shutdown_scriptpubkey.clone().unwrap(); - let funding_outpoint = self.funding_outpoint().into_bitcoin_outpoint(); + Ok(OutboundV1Channel { + context: ChannelContext { + user_id, - let closing_transaction = ClosingTransaction::new(value_to_holder as u64, value_to_counterparty as u64, holder_shutdown_script, counterparty_shutdown_script, funding_outpoint); - (closing_transaction, total_fee_satoshis) - } + config: LegacyChannelConfig { + options: config.channel_config.clone(), + announced_channel: config.channel_handshake_config.announced_channel, + commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey, + }, - fn funding_outpoint(&self) -> OutPoint { - self.context.channel_transaction_parameters.funding_outpoint.unwrap() - } + prev_config: None, - #[inline] - /// Creates a set of keys for build_commitment_transaction to generate a transaction which our - /// counterparty will sign (ie DO NOT send signatures over a transaction created by this to - /// our counterparty!) - /// The result is a transaction which we can revoke broadcastership of (ie a "local" transaction) - /// TODO Some magic rust shit to compile-time check this? - fn build_holder_transaction_keys(&self, commitment_number: u64) -> TxCreationKeys { - let per_commitment_point = self.context.holder_signer.get_per_commitment_point(commitment_number, &self.context.secp_ctx); - let delayed_payment_base = &self.get_holder_pubkeys().delayed_payment_basepoint; - let htlc_basepoint = &self.get_holder_pubkeys().htlc_basepoint; - let counterparty_pubkeys = self.get_counterparty_pubkeys(); + inbound_handshake_limits_override: Some(config.channel_handshake_limits.clone()), - TxCreationKeys::derive_new(&self.context.secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &counterparty_pubkeys.revocation_basepoint, &counterparty_pubkeys.htlc_basepoint) - } + channel_id: entropy_source.get_secure_random_bytes(), + channel_state: ChannelState::OurInitSent as u32, + announcement_sigs_state: AnnouncementSigsState::NotSent, + secp_ctx, + channel_value_satoshis, - #[inline] - /// Creates a set of keys for build_commitment_transaction to generate a transaction which we - /// will sign and send to our counterparty. - /// If an Err is returned, it is a ChannelError::Close (for get_outbound_funding_created) - fn build_remote_transaction_keys(&self) -> TxCreationKeys { - //TODO: Ensure that the payment_key derived here ends up in the library users' wallet as we - //may see payments to it! - let revocation_basepoint = &self.get_holder_pubkeys().revocation_basepoint; - let htlc_basepoint = &self.get_holder_pubkeys().htlc_basepoint; - let counterparty_pubkeys = self.get_counterparty_pubkeys(); + latest_monitor_update_id: 0, - TxCreationKeys::derive_new(&self.context.secp_ctx, &self.context.counterparty_cur_commitment_point.unwrap(), &counterparty_pubkeys.delayed_payment_basepoint, &counterparty_pubkeys.htlc_basepoint, revocation_basepoint, htlc_basepoint) - } + holder_signer, + shutdown_scriptpubkey, + destination_script: signer_provider.get_destination_script(), - /// Gets the redeemscript for the funding transaction output (ie the funding transaction output - /// pays to get_funding_redeemscript().to_v0_p2wsh()). - /// Panics if called before accept_channel/new_from_req - pub fn get_funding_redeemscript(&self) -> Script { - make_funding_redeemscript(&self.get_holder_pubkeys().funding_pubkey, self.counterparty_funding_pubkey()) - } + cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER, + cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER, + value_to_self_msat, - /// Claims an HTLC while we're disconnected from a peer, dropping the [`ChannelMonitorUpdate`] - /// entirely. - /// - /// The [`ChannelMonitor`] for this channel MUST be updated out-of-band with the preimage - /// provided (i.e. without calling [`crate::chain::Watch::update_channel`]). - /// - /// The HTLC claim will end up in the holding cell (because the caller must ensure the peer is - /// disconnected). - pub fn claim_htlc_while_disconnected_dropping_mon_update - (&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L) - where L::Target: Logger { - // Assert that we'll add the HTLC claim to the holding cell in `get_update_fulfill_htlc` - // (see equivalent if condition there). - assert!(self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32) != 0); - let mon_update_id = self.context.latest_monitor_update_id; // Forget the ChannelMonitor update - let fulfill_resp = self.get_update_fulfill_htlc(htlc_id_arg, payment_preimage_arg, logger); - self.context.latest_monitor_update_id = mon_update_id; - if let UpdateFulfillFetch::NewClaim { msg, .. } = fulfill_resp { - assert!(msg.is_none()); // The HTLC must have ended up in the holding cell. - } - } + pending_inbound_htlcs: Vec::new(), + pending_outbound_htlcs: Vec::new(), + holding_cell_htlc_updates: Vec::new(), + pending_update_fee: None, + holding_cell_update_fee: None, + next_holder_htlc_id: 0, + next_counterparty_htlc_id: 0, + update_time_counter: 1, - fn get_update_fulfill_htlc(&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L) -> UpdateFulfillFetch where L::Target: Logger { - // Either ChannelReady got set (which means it won't be unset) or there is no way any - // caller thought we could have something claimed (cause we wouldn't have accepted in an - // incoming HTLC anyway). If we got to ShutdownComplete, callers aren't allowed to call us, - // either. - if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) { - panic!("Was asked to fulfill an HTLC when channel was not in an operational state"); - } - assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0); + resend_order: RAACommitmentOrder::CommitmentFirst, - let payment_hash_calc = PaymentHash(Sha256::hash(&payment_preimage_arg.0[..]).into_inner()); + monitor_pending_channel_ready: false, + monitor_pending_revoke_and_ack: false, + monitor_pending_commitment_signed: false, + monitor_pending_forwards: Vec::new(), + monitor_pending_failures: Vec::new(), + monitor_pending_finalized_fulfills: Vec::new(), - // ChannelManager may generate duplicate claims/fails due to HTLC update events from - // on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop - // these, but for now we just have to treat them as normal. + #[cfg(debug_assertions)] + holder_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)), + #[cfg(debug_assertions)] + counterparty_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)), - let mut pending_idx = core::usize::MAX; - let mut htlc_value_msat = 0; - for (idx, htlc) in self.context.pending_inbound_htlcs.iter().enumerate() { - if htlc.htlc_id == htlc_id_arg { - assert_eq!(htlc.payment_hash, payment_hash_calc); - match htlc.state { - InboundHTLCState::Committed => {}, - InboundHTLCState::LocalRemoved(ref reason) => { - if let &InboundHTLCRemovalReason::Fulfill(_) = reason { - } else { - log_warn!(logger, "Have preimage and want to fulfill HTLC with payment hash {} we already failed against channel {}", log_bytes!(htlc.payment_hash.0), log_bytes!(self.channel_id())); - debug_assert!(false, "Tried to fulfill an HTLC that was already failed"); - } - return UpdateFulfillFetch::DuplicateClaim {}; - }, - _ => { - debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to"); - // Don't return in release mode here so that we can update channel_monitor - } - } - pending_idx = idx; - htlc_value_msat = htlc.amount_msat; - break; + last_sent_closing_fee: None, + pending_counterparty_closing_signed: None, + closing_fee_limits: None, + target_closing_feerate_sats_per_kw: None, + + inbound_awaiting_accept: false, + + funding_tx_confirmed_in: None, + funding_tx_confirmation_height: 0, + short_channel_id: None, + channel_creation_height: current_chain_height, + + feerate_per_kw: feerate, + counterparty_dust_limit_satoshis: 0, + holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS, + counterparty_max_htlc_value_in_flight_msat: 0, + holder_max_htlc_value_in_flight_msat: Self::get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &config.channel_handshake_config), + counterparty_selected_channel_reserve_satoshis: None, // Filled in in accept_channel + holder_selected_channel_reserve_satoshis, + counterparty_htlc_minimum_msat: 0, + holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat }, + counterparty_max_accepted_htlcs: 0, + minimum_depth: None, // Filled in in accept_channel + + counterparty_forwarding_info: None, + + channel_transaction_parameters: ChannelTransactionParameters { + holder_pubkeys: pubkeys, + holder_selected_contest_delay: config.channel_handshake_config.our_to_self_delay, + is_outbound_from_holder: true, + counterparty_parameters: None, + funding_outpoint: None, + opt_anchors: if channel_type.requires_anchors_zero_fee_htlc_tx() { Some(()) } else { None }, + opt_non_zero_fee_anchors: None + }, + funding_transaction: None, + + counterparty_cur_commitment_point: None, + counterparty_prev_commitment_point: None, + counterparty_node_id, + + counterparty_shutdown_scriptpubkey: None, + + commitment_secrets: CounterpartyCommitmentSecrets::new(), + + channel_update_status: ChannelUpdateStatus::Enabled, + closing_signed_in_flight: false, + + announcement_sigs: None, + + #[cfg(any(test, fuzzing))] + next_local_commitment_tx_fee_info_cached: Mutex::new(None), + #[cfg(any(test, fuzzing))] + next_remote_commitment_tx_fee_info_cached: Mutex::new(None), + + workaround_lnd_bug_4006: None, + + latest_inbound_scid_alias: None, + outbound_scid_alias, + + channel_ready_event_emitted: false, + + #[cfg(any(test, fuzzing))] + historical_inbound_htlc_fulfills: HashSet::new(), + + channel_type, + channel_keys_id, + + pending_monitor_updates: Vec::new(), } + }) + } + + /// If an Err is returned, it is a ChannelError::Close (for get_outbound_funding_created) + fn get_outbound_funding_created_signature(&mut self, logger: &L) -> Result where L::Target: Logger { + let counterparty_keys = self.build_remote_transaction_keys(); + let counterparty_initial_commitment_tx = self.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx; + Ok(self.context.holder_signer.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), &self.context.secp_ctx) + .map_err(|_| ChannelError::Close("Failed to get signatures for new commitment_signed".to_owned()))?.0) + } + + /// Updates channel state with knowledge of the funding transaction's txid/index, and generates + /// a funding_created message for the remote peer. + /// Panics if called at some time other than immediately after initial handshake, if called twice, + /// or if called on an inbound channel. + /// Note that channel_id changes during this call! + /// Do NOT broadcast the funding transaction until after a successful funding_signed call! + /// If an Err is returned, it is a ChannelError::Close. + pub fn get_outbound_funding_created(mut self, funding_transaction: Transaction, funding_txo: OutPoint, logger: &L) -> Result<(FundedChannel, msgs::FundingCreated), (OutboundV1Channel, ChannelError)> where L::Target: Logger { + if !self.is_outbound() { + panic!("Tried to create outbound funding_created message on an inbound channel!"); } - if pending_idx == core::usize::MAX { - #[cfg(any(test, fuzzing))] - // If we failed to find an HTLC to fulfill, make sure it was previously fulfilled and - // this is simply a duplicate claim, not previously failed and we lost funds. - debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg)); - return UpdateFulfillFetch::DuplicateClaim {}; + if self.context.channel_state != (ChannelState::OurInitSent as u32 | ChannelState::TheirInitSent as u32) { + panic!("Tried to get a funding_created messsage at a time other than immediately after initial handshake completion (or tried to get funding_created twice)"); + } + if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) || + self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER || + self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER { + panic!("Should not have advanced channel commitment tx numbers prior to funding_created"); } - // Now update local state: - // - // We have to put the payment_preimage in the channel_monitor right away here to ensure we - // can claim it even if the channel hits the chain before we see their next commitment. - self.context.latest_monitor_update_id += 1; - let monitor_update = ChannelMonitorUpdate { - update_id: self.context.latest_monitor_update_id, - updates: vec![ChannelMonitorUpdateStep::PaymentPreimage { - payment_preimage: payment_preimage_arg.clone(), - }], - }; + self.context.channel_transaction_parameters.funding_outpoint = Some(funding_txo); + self.context.holder_signer.provide_channel_parameters(&self.context.channel_transaction_parameters); - if (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0 { - // Note that this condition is the same as the assertion in - // `claim_htlc_while_disconnected_dropping_mon_update` and must match exactly - - // `claim_htlc_while_disconnected_dropping_mon_update` would not work correctly if we - // do not not get into this branch. - for pending_update in self.context.holding_cell_htlc_updates.iter() { - match pending_update { - &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => { - if htlc_id_arg == htlc_id { - // Make sure we don't leave latest_monitor_update_id incremented here: - self.context.latest_monitor_update_id -= 1; - #[cfg(any(test, fuzzing))] - debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg)); - return UpdateFulfillFetch::DuplicateClaim {}; - } - }, - &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } => { - if htlc_id_arg == htlc_id { - log_warn!(logger, "Have preimage and want to fulfill HTLC with pending failure against channel {}", log_bytes!(self.channel_id())); - // TODO: We may actually be able to switch to a fulfill here, though its - // rare enough it may not be worth the complexity burden. - debug_assert!(false, "Tried to fulfill an HTLC that was already failed"); - return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None }; - } - }, - _ => {} - } + let signature = match self.get_outbound_funding_created_signature(logger) { + Ok(res) => res, + Err(e) => { + log_error!(logger, "Got bad signatures: {:?}!", e); + self.context.channel_transaction_parameters.funding_outpoint = None; + return Err((self, e)); } - log_trace!(logger, "Adding HTLC claim to holding_cell in channel {}! Current state: {}", log_bytes!(self.channel_id()), self.context.channel_state); - self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::ClaimHTLC { - payment_preimage: payment_preimage_arg, htlc_id: htlc_id_arg, - }); - #[cfg(any(test, fuzzing))] - self.context.historical_inbound_htlc_fulfills.insert(htlc_id_arg); - return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None }; - } - #[cfg(any(test, fuzzing))] - self.context.historical_inbound_htlc_fulfills.insert(htlc_id_arg); + }; - { - let htlc = &mut self.context.pending_inbound_htlcs[pending_idx]; - if let InboundHTLCState::Committed = htlc.state { - } else { - debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to"); - return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None }; - } - log_trace!(logger, "Upgrading HTLC {} to LocalRemoved with a Fulfill in channel {}!", log_bytes!(htlc.payment_hash.0), log_bytes!(self.context.channel_id)); - htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(payment_preimage_arg.clone())); - } + let temporary_channel_id = self.context.channel_id; - UpdateFulfillFetch::NewClaim { - monitor_update, - htlc_value_msat, - msg: Some(msgs::UpdateFulfillHTLC { - channel_id: self.channel_id(), - htlc_id: htlc_id_arg, - payment_preimage: payment_preimage_arg, - }), - } - } + // Now that we're past error-generating stuff, update our local state: - pub fn get_update_fulfill_htlc_and_commit(&mut self, htlc_id: u64, payment_preimage: PaymentPreimage, logger: &L) -> UpdateFulfillCommitFetch where L::Target: Logger { - match self.get_update_fulfill_htlc(htlc_id, payment_preimage, logger) { - UpdateFulfillFetch::NewClaim { mut monitor_update, htlc_value_msat, msg: Some(_) } => { - let mut additional_update = self.build_commitment_no_status_check(logger); - // build_commitment_no_status_check may bump latest_monitor_id but we want them to be - // strictly increasing by one, so decrement it here. - self.context.latest_monitor_update_id = monitor_update.update_id; - monitor_update.updates.append(&mut additional_update.updates); - self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new()); - self.context.pending_monitor_updates.push(monitor_update); - UpdateFulfillCommitFetch::NewClaim { - monitor_update: self.context.pending_monitor_updates.last().unwrap(), - htlc_value_msat, - } - }, - UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None } => { - self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new()); - self.context.pending_monitor_updates.push(monitor_update); - UpdateFulfillCommitFetch::NewClaim { - monitor_update: self.context.pending_monitor_updates.last().unwrap(), - htlc_value_msat, - } - } - UpdateFulfillFetch::DuplicateClaim {} => UpdateFulfillCommitFetch::DuplicateClaim {}, - } - } + let mut channel = FundedChannel { + context: self.context, + }; - /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill - /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot, - /// however, fail more than once as we wait for an upstream failure to be irrevocably committed - /// before we fail backwards. - /// - /// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always - /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be - /// [`ChannelError::Ignore`]. - pub fn queue_fail_htlc(&mut self, htlc_id_arg: u64, err_packet: msgs::OnionErrorPacket, logger: &L) - -> Result<(), ChannelError> where L::Target: Logger { - self.fail_htlc(htlc_id_arg, err_packet, true, logger) - .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?")) + channel.context.channel_state = ChannelState::FundingCreated as u32; + channel.context.channel_id = funding_txo.to_channel_id(); + channel.context.funding_transaction = Some(funding_transaction); + + Ok((channel, msgs::FundingCreated { + temporary_channel_id, + funding_txid: funding_txo.txid, + funding_output_index: funding_txo.index, + signature + })) } - /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill - /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot, - /// however, fail more than once as we wait for an upstream failure to be irrevocably committed - /// before we fail backwards. - /// - /// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always - /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be - /// [`ChannelError::Ignore`]. - fn fail_htlc(&mut self, htlc_id_arg: u64, err_packet: msgs::OnionErrorPacket, mut force_holding_cell: bool, logger: &L) - -> Result, ChannelError> where L::Target: Logger { - if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) { - panic!("Was asked to fail an HTLC when channel was not in an operational state"); + fn get_initial_channel_type(config: &UserConfig, their_features: &InitFeatures) -> ChannelTypeFeatures { + // The default channel type (ie the first one we try) depends on whether the channel is + // public - if it is, we just go with `only_static_remotekey` as it's the only option + // available. If it's private, we first try `scid_privacy` as it provides better privacy + // with no other changes, and fall back to `only_static_remotekey`. + let mut ret = ChannelTypeFeatures::only_static_remote_key(); + if !config.channel_handshake_config.announced_channel && + config.channel_handshake_config.negotiate_scid_privacy && + their_features.supports_scid_privacy() { + ret.set_scid_privacy_required(); } - assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0); - - // ChannelManager may generate duplicate claims/fails due to HTLC update events from - // on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop - // these, but for now we just have to treat them as normal. - let mut pending_idx = core::usize::MAX; - for (idx, htlc) in self.context.pending_inbound_htlcs.iter().enumerate() { - if htlc.htlc_id == htlc_id_arg { - match htlc.state { - InboundHTLCState::Committed => {}, - InboundHTLCState::LocalRemoved(ref reason) => { - if let &InboundHTLCRemovalReason::Fulfill(_) = reason { - } else { - debug_assert!(false, "Tried to fail an HTLC that was already failed"); - } - return Ok(None); - }, - _ => { - debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to"); - return Err(ChannelError::Ignore(format!("Unable to find a pending HTLC which matched the given HTLC ID ({})", htlc.htlc_id))); - } - } - pending_idx = idx; + // Optionally, if the user would like to negotiate the `anchors_zero_fee_htlc_tx` option, we + // set it now. If they don't understand it, we'll fall back to our default of + // `only_static_remotekey`. + #[cfg(anchors)] + { // Attributes are not allowed on if expressions on our current MSRV of 1.41. + if config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx && + their_features.supports_anchors_zero_fee_htlc_tx() { + ret.set_anchors_zero_fee_htlc_tx_required(); } } - if pending_idx == core::usize::MAX { - #[cfg(any(test, fuzzing))] - // If we failed to find an HTLC to fail, make sure it was previously fulfilled and this - // is simply a duplicate fail, not previously failed and we failed-back too early. - debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg)); - return Ok(None); - } - if (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0 { - debug_assert!(force_holding_cell, "!force_holding_cell is only called when emptying the holding cell, so we shouldn't end up back in it!"); - force_holding_cell = true; + ret + } + + /// If we receive an error message, it may only be a rejection of the channel type we tried, + /// not of our ability to open any channel at all. Thus, on error, we should first call this + /// and see if we get a new `OpenChannel` message, otherwise the channel is failed. + pub(crate) fn maybe_handle_error_without_close(&mut self, chain_hash: BlockHash) -> Result { + if !self.is_outbound() || self.context.channel_state != ChannelState::OurInitSent as u32 { return Err(()); } + if self.context.channel_type == ChannelTypeFeatures::only_static_remote_key() { + // We've exhausted our options + return Err(()); + } + // We support opening a few different types of channels. Try removing our additional + // features one by one until we've either arrived at our default or the counterparty has + // accepted one. + // + // Due to the order below, we may not negotiate `option_anchors_zero_fee_htlc_tx` if the + // counterparty doesn't support `option_scid_privacy`. Since `get_initial_channel_type` + // checks whether the counterparty supports every feature, this would only happen if the + // counterparty is advertising the feature, but rejecting channels proposing the feature for + // whatever reason. + if self.context.channel_type.supports_anchors_zero_fee_htlc_tx() { + self.context.channel_type.clear_anchors_zero_fee_htlc_tx(); + assert!(self.context.channel_transaction_parameters.opt_non_zero_fee_anchors.is_none()); + self.context.channel_transaction_parameters.opt_anchors = None; + } else if self.context.channel_type.supports_scid_privacy() { + self.context.channel_type.clear_scid_privacy(); + } else { + self.context.channel_type = ChannelTypeFeatures::only_static_remote_key(); } + Ok(self.get_open_channel(chain_hash)) + } - // Now update local state: - if force_holding_cell { - for pending_update in self.context.holding_cell_htlc_updates.iter() { - match pending_update { - &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => { - if htlc_id_arg == htlc_id { - #[cfg(any(test, fuzzing))] - debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg)); - return Ok(None); - } - }, - &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } => { - if htlc_id_arg == htlc_id { - debug_assert!(false, "Tried to fail an HTLC that was already failed"); - return Err(ChannelError::Ignore("Unable to find a pending HTLC which matched the given HTLC ID".to_owned())); - } - }, - _ => {} - } - } - log_trace!(logger, "Placing failure for HTLC ID {} in holding cell in channel {}.", htlc_id_arg, log_bytes!(self.channel_id())); - self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::FailHTLC { - htlc_id: htlc_id_arg, - err_packet, - }); - return Ok(None); + pub fn get_open_channel(&self, chain_hash: BlockHash) -> msgs::OpenChannel { + if !self.is_outbound() { + panic!("Tried to open a channel for an inbound channel?"); + } + if self.context.channel_state != ChannelState::OurInitSent as u32 { + panic!("Cannot generate an open_channel after we've moved forward"); } - log_trace!(logger, "Failing HTLC ID {} back with a update_fail_htlc message in channel {}.", htlc_id_arg, log_bytes!(self.channel_id())); - { - let htlc = &mut self.context.pending_inbound_htlcs[pending_idx]; - htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(err_packet.clone())); + if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER { + panic!("Tried to send an open_channel for a channel that has already advanced"); } - Ok(Some(msgs::UpdateFailHTLC { - channel_id: self.channel_id(), - htlc_id: htlc_id_arg, - reason: err_packet - })) - } + let first_per_commitment_point = self.context.holder_signer.get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx); + let keys = self.get_holder_pubkeys(); - // Message handlers: + msgs::OpenChannel { + chain_hash, + temporary_channel_id: self.context.channel_id, + funding_satoshis: self.context.channel_value_satoshis, + push_msat: self.context.channel_value_satoshis * 1000 - self.context.value_to_self_msat, + dust_limit_satoshis: self.context.holder_dust_limit_satoshis, + max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat, + channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis, + htlc_minimum_msat: self.context.holder_htlc_minimum_msat, + feerate_per_kw: self.context.feerate_per_kw as u32, + to_self_delay: self.get_holder_selected_contest_delay(), + max_accepted_htlcs: OUR_MAX_HTLCS, + funding_pubkey: keys.funding_pubkey, + revocation_basepoint: keys.revocation_basepoint, + payment_point: keys.payment_point, + delayed_payment_basepoint: keys.delayed_payment_basepoint, + htlc_basepoint: keys.htlc_basepoint, + first_per_commitment_point, + channel_flags: if self.context.config.announced_channel {1} else {0}, + shutdown_scriptpubkey: OptionalField::Present(match &self.context.shutdown_scriptpubkey { + Some(script) => script.clone().into_inner(), + None => Builder::new().into_script(), + }), + channel_type: Some(self.context.channel_type.clone()), + } + } + // Message handlers pub fn accept_channel(&mut self, msg: &msgs::AcceptChannel, default_limits: &ChannelHandshakeLimits, their_features: &InitFeatures) -> Result<(), ChannelError> { let peer_limits = if let Some(ref limits) = self.context.inbound_handshake_limits_override { limits } else { default_limits }; @@ -2482,251 +2812,448 @@ impl FundedChannel { return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned())); } } - } else { None }; - - self.context.counterparty_dust_limit_satoshis = msg.dust_limit_satoshis; - self.context.counterparty_max_htlc_value_in_flight_msat = cmp::min(msg.max_htlc_value_in_flight_msat, self.context.channel_value_satoshis * 1000); - self.context.counterparty_selected_channel_reserve_satoshis = Some(msg.channel_reserve_satoshis); - self.context.counterparty_htlc_minimum_msat = msg.htlc_minimum_msat; - self.context.counterparty_max_accepted_htlcs = msg.max_accepted_htlcs; - - if peer_limits.trust_own_funding_0conf { - self.context.minimum_depth = Some(msg.minimum_depth); - } else { - self.context.minimum_depth = Some(cmp::max(1, msg.minimum_depth)); + } else { None }; + + self.context.counterparty_dust_limit_satoshis = msg.dust_limit_satoshis; + self.context.counterparty_max_htlc_value_in_flight_msat = cmp::min(msg.max_htlc_value_in_flight_msat, self.context.channel_value_satoshis * 1000); + self.context.counterparty_selected_channel_reserve_satoshis = Some(msg.channel_reserve_satoshis); + self.context.counterparty_htlc_minimum_msat = msg.htlc_minimum_msat; + self.context.counterparty_max_accepted_htlcs = msg.max_accepted_htlcs; + + if peer_limits.trust_own_funding_0conf { + self.context.minimum_depth = Some(msg.minimum_depth); + } else { + self.context.minimum_depth = Some(cmp::max(1, msg.minimum_depth)); + } + + let counterparty_pubkeys = ChannelPublicKeys { + funding_pubkey: msg.funding_pubkey, + revocation_basepoint: msg.revocation_basepoint, + payment_point: msg.payment_point, + delayed_payment_basepoint: msg.delayed_payment_basepoint, + htlc_basepoint: msg.htlc_basepoint + }; + + self.context.channel_transaction_parameters.counterparty_parameters = Some(CounterpartyChannelTransactionParameters { + selected_contest_delay: msg.to_self_delay, + pubkeys: counterparty_pubkeys, + }); + + self.context.counterparty_cur_commitment_point = Some(msg.first_per_commitment_point); + self.context.counterparty_shutdown_scriptpubkey = counterparty_shutdown_scriptpubkey; + + self.context.channel_state = ChannelState::OurInitSent as u32 | ChannelState::TheirInitSent as u32; + self.context.inbound_handshake_limits_override = None; // We're done enforcing limits on our peer's handshake now. + + Ok(()) + } +} + +// Holder designates channel data owned for the benefice of the user client. +// Counterparty designates channel data owned by the another channel participant entity. +pub(super) struct FundedChannel { + #[cfg(not(test))] + context: ChannelContext, + #[cfg(test)] + pub context: ChannelContext, +} + +#[cfg(any(test, fuzzing))] +struct CommitmentTxInfoCached { + fee: u64, + total_pending_htlcs: usize, + next_holder_htlc_id: u64, + next_counterparty_htlc_id: u64, + feerate: u32, +} + +impl FundedChannel { + fn check_remote_fee(fee_estimator: &LowerBoundedFeeEstimator, + feerate_per_kw: u32, cur_feerate_per_kw: Option, logger: &L) + -> Result<(), ChannelError> where F::Target: FeeEstimator, L::Target: Logger, + { + // We only bound the fee updates on the upper side to prevent completely absurd feerates, + // always accepting up to 25 sat/vByte or 10x our fee estimator's "High Priority" fee. + // We generally don't care too much if they set the feerate to something very high, but it + // could result in the channel being useless due to everything being dust. + let upper_limit = cmp::max(250 * 25, + fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::HighPriority) as u64 * 10); + if feerate_per_kw as u64 > upper_limit { + return Err(ChannelError::Close(format!("Peer's feerate much too high. Actual: {}. Our expected upper limit: {}", feerate_per_kw, upper_limit))); + } + let lower_limit = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Background); + // Some fee estimators round up to the next full sat/vbyte (ie 250 sats per kw), causing + // occasional issues with feerate disagreements between an initiator that wants a feerate + // of 1.1 sat/vbyte and a receiver that wants 1.1 rounded up to 2. Thus, we always add 250 + // sat/kw before the comparison here. + if feerate_per_kw + 250 < lower_limit { + if let Some(cur_feerate) = cur_feerate_per_kw { + if feerate_per_kw > cur_feerate { + log_warn!(logger, + "Accepting feerate that may prevent us from closing this channel because it's higher than what we have now. Had {} s/kW, now {} s/kW.", + cur_feerate, feerate_per_kw); + return Ok(()); + } + } + return Err(ChannelError::Close(format!("Peer's feerate much too low. Actual: {}. Our expected lower limit: {} (- 250)", feerate_per_kw, lower_limit))); } + Ok(()) + } - let counterparty_pubkeys = ChannelPublicKeys { - funding_pubkey: msg.funding_pubkey, - revocation_basepoint: msg.revocation_basepoint, - payment_point: msg.payment_point, - delayed_payment_basepoint: msg.delayed_payment_basepoint, - htlc_basepoint: msg.htlc_basepoint - }; - - self.context.channel_transaction_parameters.counterparty_parameters = Some(CounterpartyChannelTransactionParameters { - selected_contest_delay: msg.to_self_delay, - pubkeys: counterparty_pubkeys, - }); - - self.context.counterparty_cur_commitment_point = Some(msg.first_per_commitment_point); - self.context.counterparty_shutdown_scriptpubkey = counterparty_shutdown_scriptpubkey; - - self.context.channel_state = ChannelState::OurInitSent as u32 | ChannelState::TheirInitSent as u32; - self.context.inbound_handshake_limits_override = None; // We're done enforcing limits on our peer's handshake now. + #[inline] + fn get_closing_scriptpubkey(&self) -> Script { + // The shutdown scriptpubkey is set on channel opening when option_upfront_shutdown_script + // is signaled. Otherwise, it is set when sending a shutdown message. Calling this method + // outside of those situations will fail. + self.context.shutdown_scriptpubkey.clone().unwrap().into_inner() + } - Ok(()) + #[inline] + fn get_closing_transaction_weight(&self, a_scriptpubkey: Option<&Script>, b_scriptpubkey: Option<&Script>) -> u64 { + let mut ret = + (4 + // version + 1 + // input count + 36 + // prevout + 1 + // script length (0) + 4 + // sequence + 1 + // output count + 4 // lock time + )*4 + // * 4 for non-witness parts + 2 + // witness marker and flag + 1 + // witness element count + 4 + // 4 element lengths (2 sigs, multisig dummy, and witness script) + self.get_funding_redeemscript().len() as u64 + // funding witness script + 2*(1 + 71); // two signatures + sighash type flags + if let Some(spk) = a_scriptpubkey { + ret += ((8+1) + // output values and script length + spk.len() as u64) * 4; // scriptpubkey and witness multiplier + } + if let Some(spk) = b_scriptpubkey { + ret += ((8+1) + // output values and script length + spk.len() as u64) * 4; // scriptpubkey and witness multiplier + } + ret } - fn funding_created_signature(&mut self, sig: &Signature, logger: &L) -> Result<(Txid, CommitmentTransaction, Signature), ChannelError> where L::Target: Logger { - let funding_script = self.get_funding_redeemscript(); + #[inline] + fn build_closing_transaction(&self, proposed_total_fee_satoshis: u64, skip_remote_output: bool) -> (ClosingTransaction, u64) { + assert!(self.context.pending_inbound_htlcs.is_empty()); + assert!(self.context.pending_outbound_htlcs.is_empty()); + assert!(self.context.pending_update_fee.is_none()); - let keys = self.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number); - let initial_commitment_tx = self.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger).tx; - { - let trusted_tx = initial_commitment_tx.trust(); - let initial_commitment_bitcoin_tx = trusted_tx.built_transaction(); - let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis); - // They sign the holder commitment transaction... - log_trace!(logger, "Checking funding_created tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} for channel {}.", - log_bytes!(sig.serialize_compact()[..]), log_bytes!(self.counterparty_funding_pubkey().serialize()), - encode::serialize_hex(&initial_commitment_bitcoin_tx.transaction), log_bytes!(sighash[..]), - encode::serialize_hex(&funding_script), log_bytes!(self.channel_id())); - secp_check!(self.context.secp_ctx.verify_ecdsa(&sighash, &sig, self.counterparty_funding_pubkey()), "Invalid funding_created signature from peer".to_owned()); + let mut total_fee_satoshis = proposed_total_fee_satoshis; + let mut value_to_holder: i64 = (self.context.value_to_self_msat as i64) / 1000 - if self.is_outbound() { total_fee_satoshis as i64 } else { 0 }; + let mut value_to_counterparty: i64 = ((self.context.channel_value_satoshis * 1000 - self.context.value_to_self_msat) as i64 / 1000) - if self.is_outbound() { 0 } else { total_fee_satoshis as i64 }; + + if value_to_holder < 0 { + assert!(self.is_outbound()); + total_fee_satoshis += (-value_to_holder) as u64; + } else if value_to_counterparty < 0 { + assert!(!self.is_outbound()); + total_fee_satoshis += (-value_to_counterparty) as u64; } - let counterparty_keys = self.build_remote_transaction_keys(); - let counterparty_initial_commitment_tx = self.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx; + if skip_remote_output || value_to_counterparty as u64 <= self.context.holder_dust_limit_satoshis { + value_to_counterparty = 0; + } - let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust(); - let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction(); - log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}", - log_bytes!(self.channel_id()), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction)); + if value_to_holder as u64 <= self.context.holder_dust_limit_satoshis { + value_to_holder = 0; + } - let counterparty_signature = self.context.holder_signer.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), &self.context.secp_ctx) - .map_err(|_| ChannelError::Close("Failed to get signatures for new commitment_signed".to_owned()))?.0; + assert!(self.context.shutdown_scriptpubkey.is_some()); + let holder_shutdown_script = self.get_closing_scriptpubkey(); + let counterparty_shutdown_script = self.context.counterparty_shutdown_scriptpubkey.clone().unwrap(); + let funding_outpoint = self.funding_outpoint().into_bitcoin_outpoint(); - // We sign "counterparty" commitment transaction, allowing them to broadcast the tx if they wish. - Ok((counterparty_initial_bitcoin_tx.txid, initial_commitment_tx, counterparty_signature)) + let closing_transaction = ClosingTransaction::new(value_to_holder as u64, value_to_counterparty as u64, holder_shutdown_script, counterparty_shutdown_script, funding_outpoint); + (closing_transaction, total_fee_satoshis) } - fn counterparty_funding_pubkey(&self) -> &PublicKey { - &self.get_counterparty_pubkeys().funding_pubkey + fn funding_outpoint(&self) -> OutPoint { + self.context.channel_transaction_parameters.funding_outpoint.unwrap() } - pub fn funding_created( - &mut self, msg: &msgs::FundingCreated, best_block: BestBlock, signer_provider: &SP, logger: &L - ) -> Result<(msgs::FundingSigned, ChannelMonitor), ChannelError> - where - SP::Target: SignerProvider, - L::Target: Logger - { - if self.is_outbound() { - return Err(ChannelError::Close("Received funding_created for an outbound channel?".to_owned())); - } - if self.context.channel_state != (ChannelState::OurInitSent as u32 | ChannelState::TheirInitSent as u32) { - // BOLT 2 says that if we disconnect before we send funding_signed we SHOULD NOT - // remember the channel, so it's safe to just send an error_message here and drop the - // channel. - return Err(ChannelError::Close("Received funding_created after we got the channel!".to_owned())); - } - if self.context.inbound_awaiting_accept { - return Err(ChannelError::Close("FundingCreated message received before the channel was accepted".to_owned())); - } - if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) || - self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER || - self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER { - panic!("Should not have advanced channel commitment tx numbers prior to funding_created"); + /// Claims an HTLC while we're disconnected from a peer, dropping the [`ChannelMonitorUpdate`] + /// entirely. + /// + /// The [`ChannelMonitor`] for this channel MUST be updated out-of-band with the preimage + /// provided (i.e. without calling [`crate::chain::Watch::update_channel`]). + /// + /// The HTLC claim will end up in the holding cell (because the caller must ensure the peer is + /// disconnected). + pub fn claim_htlc_while_disconnected_dropping_mon_update + (&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L) + where L::Target: Logger { + // Assert that we'll add the HTLC claim to the holding cell in `get_update_fulfill_htlc` + // (see equivalent if condition there). + assert!(self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32) != 0); + let mon_update_id = self.context.latest_monitor_update_id; // Forget the ChannelMonitor update + let fulfill_resp = self.get_update_fulfill_htlc(htlc_id_arg, payment_preimage_arg, logger); + self.context.latest_monitor_update_id = mon_update_id; + if let UpdateFulfillFetch::NewClaim { msg, .. } = fulfill_resp { + assert!(msg.is_none()); // The HTLC must have ended up in the holding cell. } + } - let funding_txo = OutPoint { txid: msg.funding_txid, index: msg.funding_output_index }; - self.context.channel_transaction_parameters.funding_outpoint = Some(funding_txo); - // This is an externally observable change before we finish all our checks. In particular - // funding_created_signature may fail. - self.context.holder_signer.provide_channel_parameters(&self.context.channel_transaction_parameters); - - let (counterparty_initial_commitment_txid, initial_commitment_tx, signature) = match self.funding_created_signature(&msg.signature, logger) { - Ok(res) => res, - Err(ChannelError::Close(e)) => { - self.context.channel_transaction_parameters.funding_outpoint = None; - return Err(ChannelError::Close(e)); - }, - Err(e) => { - // The only error we know how to handle is ChannelError::Close, so we fall over here - // to make sure we don't continue with an inconsistent state. - panic!("unexpected error type from funding_created_signature {:?}", e); - } - }; - - let holder_commitment_tx = HolderCommitmentTransaction::new( - initial_commitment_tx, - msg.signature, - Vec::new(), - &self.get_holder_pubkeys().funding_pubkey, - self.counterparty_funding_pubkey() - ); - - self.context.holder_signer.validate_holder_commitment(&holder_commitment_tx, Vec::new()) - .map_err(|_| ChannelError::Close("Failed to validate our commitment".to_owned()))?; + fn get_update_fulfill_htlc(&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L) -> UpdateFulfillFetch where L::Target: Logger { + // Either ChannelReady got set (which means it won't be unset) or there is no way any + // caller thought we could have something claimed (cause we wouldn't have accepted in an + // incoming HTLC anyway). If we got to ShutdownComplete, callers aren't allowed to call us, + // either. + if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) { + panic!("Was asked to fulfill an HTLC when channel was not in an operational state"); + } + assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0); - // Now that we're past error-generating stuff, update our local state: + let payment_hash_calc = PaymentHash(Sha256::hash(&payment_preimage_arg.0[..]).into_inner()); - let funding_redeemscript = self.get_funding_redeemscript(); - let funding_txo_script = funding_redeemscript.to_v0_p2wsh(); - let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.get_holder_pubkeys().payment_point, &self.get_counterparty_pubkeys().payment_point, self.is_outbound()); - let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner()); - let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id); - monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters); - let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer, - shutdown_script, self.get_holder_selected_contest_delay(), - &self.context.destination_script, (funding_txo, funding_txo_script.clone()), - &self.context.channel_transaction_parameters, - funding_redeemscript.clone(), self.context.channel_value_satoshis, - obscure_factor, - holder_commitment_tx, best_block, self.context.counterparty_node_id); + // ChannelManager may generate duplicate claims/fails due to HTLC update events from + // on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop + // these, but for now we just have to treat them as normal. - channel_monitor.provide_latest_counterparty_commitment_tx(counterparty_initial_commitment_txid, Vec::new(), self.context.cur_counterparty_commitment_transaction_number, self.context.counterparty_cur_commitment_point.unwrap(), logger); + let mut pending_idx = core::usize::MAX; + let mut htlc_value_msat = 0; + for (idx, htlc) in self.context.pending_inbound_htlcs.iter().enumerate() { + if htlc.htlc_id == htlc_id_arg { + assert_eq!(htlc.payment_hash, payment_hash_calc); + match htlc.state { + InboundHTLCState::Committed => {}, + InboundHTLCState::LocalRemoved(ref reason) => { + if let &InboundHTLCRemovalReason::Fulfill(_) = reason { + } else { + log_warn!(logger, "Have preimage and want to fulfill HTLC with payment hash {} we already failed against channel {}", log_bytes!(htlc.payment_hash.0), log_bytes!(self.channel_id())); + debug_assert!(false, "Tried to fulfill an HTLC that was already failed"); + } + return UpdateFulfillFetch::DuplicateClaim {}; + }, + _ => { + debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to"); + // Don't return in release mode here so that we can update channel_monitor + } + } + pending_idx = idx; + htlc_value_msat = htlc.amount_msat; + break; + } + } + if pending_idx == core::usize::MAX { + #[cfg(any(test, fuzzing))] + // If we failed to find an HTLC to fulfill, make sure it was previously fulfilled and + // this is simply a duplicate claim, not previously failed and we lost funds. + debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg)); + return UpdateFulfillFetch::DuplicateClaim {}; + } - self.context.channel_state = ChannelState::FundingSent as u32; - self.context.channel_id = funding_txo.to_channel_id(); - self.context.cur_counterparty_commitment_transaction_number -= 1; - self.context.cur_holder_commitment_transaction_number -= 1; + // Now update local state: + // + // We have to put the payment_preimage in the channel_monitor right away here to ensure we + // can claim it even if the channel hits the chain before we see their next commitment. + self.context.latest_monitor_update_id += 1; + let monitor_update = ChannelMonitorUpdate { + update_id: self.context.latest_monitor_update_id, + updates: vec![ChannelMonitorUpdateStep::PaymentPreimage { + payment_preimage: payment_preimage_arg.clone(), + }], + }; - log_info!(logger, "Generated funding_signed for peer for channel {}", log_bytes!(self.channel_id())); + if (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0 { + // Note that this condition is the same as the assertion in + // `claim_htlc_while_disconnected_dropping_mon_update` and must match exactly - + // `claim_htlc_while_disconnected_dropping_mon_update` would not work correctly if we + // do not not get into this branch. + for pending_update in self.context.holding_cell_htlc_updates.iter() { + match pending_update { + &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => { + if htlc_id_arg == htlc_id { + // Make sure we don't leave latest_monitor_update_id incremented here: + self.context.latest_monitor_update_id -= 1; + #[cfg(any(test, fuzzing))] + debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg)); + return UpdateFulfillFetch::DuplicateClaim {}; + } + }, + &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } => { + if htlc_id_arg == htlc_id { + log_warn!(logger, "Have preimage and want to fulfill HTLC with pending failure against channel {}", log_bytes!(self.channel_id())); + // TODO: We may actually be able to switch to a fulfill here, though its + // rare enough it may not be worth the complexity burden. + debug_assert!(false, "Tried to fulfill an HTLC that was already failed"); + return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None }; + } + }, + _ => {} + } + } + log_trace!(logger, "Adding HTLC claim to holding_cell in channel {}! Current state: {}", log_bytes!(self.channel_id()), self.context.channel_state); + self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::ClaimHTLC { + payment_preimage: payment_preimage_arg, htlc_id: htlc_id_arg, + }); + #[cfg(any(test, fuzzing))] + self.context.historical_inbound_htlc_fulfills.insert(htlc_id_arg); + return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None }; + } + #[cfg(any(test, fuzzing))] + self.context.historical_inbound_htlc_fulfills.insert(htlc_id_arg); - let need_channel_ready = self.check_get_channel_ready(0).is_some(); - self.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new()); + { + let htlc = &mut self.context.pending_inbound_htlcs[pending_idx]; + if let InboundHTLCState::Committed = htlc.state { + } else { + debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to"); + return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None }; + } + log_trace!(logger, "Upgrading HTLC {} to LocalRemoved with a Fulfill in channel {}!", log_bytes!(htlc.payment_hash.0), log_bytes!(self.context.channel_id)); + htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(payment_preimage_arg.clone())); + } - Ok((msgs::FundingSigned { - channel_id: self.context.channel_id, - signature - }, channel_monitor)) + UpdateFulfillFetch::NewClaim { + monitor_update, + htlc_value_msat, + msg: Some(msgs::UpdateFulfillHTLC { + channel_id: self.channel_id(), + htlc_id: htlc_id_arg, + payment_preimage: payment_preimage_arg, + }), + } } - /// Handles a funding_signed message from the remote end. - /// If this call is successful, broadcast the funding transaction (and not before!) - pub fn funding_signed( - &mut self, msg: &msgs::FundingSigned, best_block: BestBlock, signer_provider: &SP, logger: &L - ) -> Result, ChannelError> - where - SP::Target: SignerProvider, - L::Target: Logger - { - if !self.is_outbound() { - return Err(ChannelError::Close("Received funding_signed for an inbound channel?".to_owned())); - } - if self.context.channel_state & !(ChannelState::MonitorUpdateInProgress as u32) != ChannelState::FundingCreated as u32 { - return Err(ChannelError::Close("Received funding_signed in strange state!".to_owned())); - } - if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) || - self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER || - self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER { - panic!("Should not have advanced channel commitment tx numbers prior to funding_created"); + pub fn get_update_fulfill_htlc_and_commit(&mut self, htlc_id: u64, payment_preimage: PaymentPreimage, logger: &L) -> UpdateFulfillCommitFetch where L::Target: Logger { + match self.get_update_fulfill_htlc(htlc_id, payment_preimage, logger) { + UpdateFulfillFetch::NewClaim { mut monitor_update, htlc_value_msat, msg: Some(_) } => { + let mut additional_update = self.build_commitment_no_status_check(logger); + // build_commitment_no_status_check may bump latest_monitor_id but we want them to be + // strictly increasing by one, so decrement it here. + self.context.latest_monitor_update_id = monitor_update.update_id; + monitor_update.updates.append(&mut additional_update.updates); + self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new()); + self.context.pending_monitor_updates.push(monitor_update); + UpdateFulfillCommitFetch::NewClaim { + monitor_update: self.context.pending_monitor_updates.last().unwrap(), + htlc_value_msat, + } + }, + UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None } => { + self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new()); + self.context.pending_monitor_updates.push(monitor_update); + UpdateFulfillCommitFetch::NewClaim { + monitor_update: self.context.pending_monitor_updates.last().unwrap(), + htlc_value_msat, + } + } + UpdateFulfillFetch::DuplicateClaim {} => UpdateFulfillCommitFetch::DuplicateClaim {}, } + } - let funding_script = self.get_funding_redeemscript(); + /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill + /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot, + /// however, fail more than once as we wait for an upstream failure to be irrevocably committed + /// before we fail backwards. + /// + /// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always + /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be + /// [`ChannelError::Ignore`]. + pub fn queue_fail_htlc(&mut self, htlc_id_arg: u64, err_packet: msgs::OnionErrorPacket, logger: &L) + -> Result<(), ChannelError> where L::Target: Logger { + self.fail_htlc(htlc_id_arg, err_packet, true, logger) + .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?")) + } - let counterparty_keys = self.build_remote_transaction_keys(); - let counterparty_initial_commitment_tx = self.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx; - let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust(); - let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction(); + /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill + /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot, + /// however, fail more than once as we wait for an upstream failure to be irrevocably committed + /// before we fail backwards. + /// + /// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always + /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be + /// [`ChannelError::Ignore`]. + fn fail_htlc(&mut self, htlc_id_arg: u64, err_packet: msgs::OnionErrorPacket, mut force_holding_cell: bool, logger: &L) + -> Result, ChannelError> where L::Target: Logger { + if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) { + panic!("Was asked to fail an HTLC when channel was not in an operational state"); + } + assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0); - log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}", - log_bytes!(self.channel_id()), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction)); + // ChannelManager may generate duplicate claims/fails due to HTLC update events from + // on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop + // these, but for now we just have to treat them as normal. - let holder_signer = self.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number); - let initial_commitment_tx = self.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &holder_signer, true, false, logger).tx; - { - let trusted_tx = initial_commitment_tx.trust(); - let initial_commitment_bitcoin_tx = trusted_tx.built_transaction(); - let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis); - // They sign our commitment transaction, allowing us to broadcast the tx if we wish. - if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.get_counterparty_pubkeys().funding_pubkey) { - return Err(ChannelError::Close("Invalid funding_signed signature from peer".to_owned())); + let mut pending_idx = core::usize::MAX; + for (idx, htlc) in self.context.pending_inbound_htlcs.iter().enumerate() { + if htlc.htlc_id == htlc_id_arg { + match htlc.state { + InboundHTLCState::Committed => {}, + InboundHTLCState::LocalRemoved(ref reason) => { + if let &InboundHTLCRemovalReason::Fulfill(_) = reason { + } else { + debug_assert!(false, "Tried to fail an HTLC that was already failed"); + } + return Ok(None); + }, + _ => { + debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to"); + return Err(ChannelError::Ignore(format!("Unable to find a pending HTLC which matched the given HTLC ID ({})", htlc.htlc_id))); + } + } + pending_idx = idx; } } + if pending_idx == core::usize::MAX { + #[cfg(any(test, fuzzing))] + // If we failed to find an HTLC to fail, make sure it was previously fulfilled and this + // is simply a duplicate fail, not previously failed and we failed-back too early. + debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg)); + return Ok(None); + } - let holder_commitment_tx = HolderCommitmentTransaction::new( - initial_commitment_tx, - msg.signature, - Vec::new(), - &self.get_holder_pubkeys().funding_pubkey, - self.counterparty_funding_pubkey() - ); - - self.context.holder_signer.validate_holder_commitment(&holder_commitment_tx, Vec::new()) - .map_err(|_| ChannelError::Close("Failed to validate our commitment".to_owned()))?; - - - let funding_redeemscript = self.get_funding_redeemscript(); - let funding_txo = self.get_funding_txo().unwrap(); - let funding_txo_script = funding_redeemscript.to_v0_p2wsh(); - let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.get_holder_pubkeys().payment_point, &self.get_counterparty_pubkeys().payment_point, self.is_outbound()); - let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner()); - let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id); - monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters); - let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer, - shutdown_script, self.get_holder_selected_contest_delay(), - &self.context.destination_script, (funding_txo, funding_txo_script), - &self.context.channel_transaction_parameters, - funding_redeemscript.clone(), self.context.channel_value_satoshis, - obscure_factor, - holder_commitment_tx, best_block, self.context.counterparty_node_id); - - channel_monitor.provide_latest_counterparty_commitment_tx(counterparty_initial_bitcoin_tx.txid, Vec::new(), self.context.cur_counterparty_commitment_transaction_number, self.context.counterparty_cur_commitment_point.unwrap(), logger); + if (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0 { + debug_assert!(force_holding_cell, "!force_holding_cell is only called when emptying the holding cell, so we shouldn't end up back in it!"); + force_holding_cell = true; + } - assert_eq!(self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32), 0); // We have no had any monitor(s) yet to fail update! - self.context.channel_state = ChannelState::FundingSent as u32; - self.context.cur_holder_commitment_transaction_number -= 1; - self.context.cur_counterparty_commitment_transaction_number -= 1; + // Now update local state: + if force_holding_cell { + for pending_update in self.context.holding_cell_htlc_updates.iter() { + match pending_update { + &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => { + if htlc_id_arg == htlc_id { + #[cfg(any(test, fuzzing))] + debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg)); + return Ok(None); + } + }, + &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } => { + if htlc_id_arg == htlc_id { + debug_assert!(false, "Tried to fail an HTLC that was already failed"); + return Err(ChannelError::Ignore("Unable to find a pending HTLC which matched the given HTLC ID".to_owned())); + } + }, + _ => {} + } + } + log_trace!(logger, "Placing failure for HTLC ID {} in holding cell in channel {}.", htlc_id_arg, log_bytes!(self.channel_id())); + self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::FailHTLC { + htlc_id: htlc_id_arg, + err_packet, + }); + return Ok(None); + } - log_info!(logger, "Received funding_signed from peer for channel {}", log_bytes!(self.channel_id())); + log_trace!(logger, "Failing HTLC ID {} back with a update_fail_htlc message in channel {}.", htlc_id_arg, log_bytes!(self.channel_id())); + { + let htlc = &mut self.context.pending_inbound_htlcs[pending_idx]; + htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(err_packet.clone())); + } - let need_channel_ready = self.check_get_channel_ready(0).is_some(); - self.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new()); - Ok(channel_monitor) + Ok(Some(msgs::UpdateFailHTLC { + channel_id: self.channel_id(), + htlc_id: htlc_id_arg, + reason: err_packet + })) } + // Message handlers: + /// Handles a channel_ready message from our peer. If we've already sent our channel_ready /// and the channel is now usable (and public), this may generate an announcement_signatures to /// reply with. @@ -2794,317 +3321,18 @@ impl FundedChannel { self.context.counterparty_prev_commitment_point = self.context.counterparty_cur_commitment_point; self.context.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point); - log_info!(logger, "Received channel_ready from peer for channel {}", log_bytes!(self.channel_id())); - - Ok(self.get_announcement_sigs(node_signer, genesis_block_hash, user_config, best_block.height(), logger)) - } - - /// Returns transaction if there is pending funding transaction that is yet to broadcast - pub fn unbroadcasted_funding(&self) -> Option { - if self.context.channel_state & (ChannelState::FundingCreated as u32) != 0 { - self.context.funding_transaction.clone() - } else { - None - } - } - - /// Returns a HTLCStats about inbound pending htlcs - fn get_inbound_pending_htlc_stats(&self, outbound_feerate_update: Option) -> HTLCStats { - let mut stats = HTLCStats { - pending_htlcs: self.context.pending_inbound_htlcs.len() as u32, - pending_htlcs_value_msat: 0, - on_counterparty_tx_dust_exposure_msat: 0, - on_holder_tx_dust_exposure_msat: 0, - holding_cell_msat: 0, - on_holder_tx_holding_cell_htlcs_count: 0, - }; - - let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if self.opt_anchors() { - (0, 0) - } else { - let dust_buffer_feerate = self.get_dust_buffer_feerate(outbound_feerate_update) as u64; - (dust_buffer_feerate * htlc_timeout_tx_weight(false) / 1000, - dust_buffer_feerate * htlc_success_tx_weight(false) / 1000) - }; - let counterparty_dust_limit_timeout_sat = htlc_timeout_dust_limit + self.context.counterparty_dust_limit_satoshis; - let holder_dust_limit_success_sat = htlc_success_dust_limit + self.context.holder_dust_limit_satoshis; - for ref htlc in self.context.pending_inbound_htlcs.iter() { - stats.pending_htlcs_value_msat += htlc.amount_msat; - if htlc.amount_msat / 1000 < counterparty_dust_limit_timeout_sat { - stats.on_counterparty_tx_dust_exposure_msat += htlc.amount_msat; - } - if htlc.amount_msat / 1000 < holder_dust_limit_success_sat { - stats.on_holder_tx_dust_exposure_msat += htlc.amount_msat; - } - } - stats - } - - /// Returns a HTLCStats about pending outbound htlcs, *including* pending adds in our holding cell. - fn get_outbound_pending_htlc_stats(&self, outbound_feerate_update: Option) -> HTLCStats { - let mut stats = HTLCStats { - pending_htlcs: self.context.pending_outbound_htlcs.len() as u32, - pending_htlcs_value_msat: 0, - on_counterparty_tx_dust_exposure_msat: 0, - on_holder_tx_dust_exposure_msat: 0, - holding_cell_msat: 0, - on_holder_tx_holding_cell_htlcs_count: 0, - }; - - let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if self.opt_anchors() { - (0, 0) - } else { - let dust_buffer_feerate = self.get_dust_buffer_feerate(outbound_feerate_update) as u64; - (dust_buffer_feerate * htlc_timeout_tx_weight(false) / 1000, - dust_buffer_feerate * htlc_success_tx_weight(false) / 1000) - }; - let counterparty_dust_limit_success_sat = htlc_success_dust_limit + self.context.counterparty_dust_limit_satoshis; - let holder_dust_limit_timeout_sat = htlc_timeout_dust_limit + self.context.holder_dust_limit_satoshis; - for ref htlc in self.context.pending_outbound_htlcs.iter() { - stats.pending_htlcs_value_msat += htlc.amount_msat; - if htlc.amount_msat / 1000 < counterparty_dust_limit_success_sat { - stats.on_counterparty_tx_dust_exposure_msat += htlc.amount_msat; - } - if htlc.amount_msat / 1000 < holder_dust_limit_timeout_sat { - stats.on_holder_tx_dust_exposure_msat += htlc.amount_msat; - } - } - - for update in self.context.holding_cell_htlc_updates.iter() { - if let &HTLCUpdateAwaitingACK::AddHTLC { ref amount_msat, .. } = update { - stats.pending_htlcs += 1; - stats.pending_htlcs_value_msat += amount_msat; - stats.holding_cell_msat += amount_msat; - if *amount_msat / 1000 < counterparty_dust_limit_success_sat { - stats.on_counterparty_tx_dust_exposure_msat += amount_msat; - } - if *amount_msat / 1000 < holder_dust_limit_timeout_sat { - stats.on_holder_tx_dust_exposure_msat += amount_msat; - } else { - stats.on_holder_tx_holding_cell_htlcs_count += 1; - } - } - } - stats - } - - /// Get the available balances, see [`AvailableBalances`]'s fields for more info. - /// Doesn't bother handling the - /// if-we-removed-it-already-but-haven't-fully-resolved-they-can-still-send-an-inbound-HTLC - /// corner case properly. - pub fn get_available_balances(&self) -> AvailableBalances { - // Note that we have to handle overflow due to the above case. - let outbound_stats = self.get_outbound_pending_htlc_stats(None); - - let mut balance_msat = self.context.value_to_self_msat; - for ref htlc in self.context.pending_inbound_htlcs.iter() { - if let InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(_)) = htlc.state { - balance_msat += htlc.amount_msat; - } - } - balance_msat -= outbound_stats.pending_htlcs_value_msat; - - let outbound_capacity_msat = cmp::max(self.context.value_to_self_msat as i64 - - outbound_stats.pending_htlcs_value_msat as i64 - - self.context.counterparty_selected_channel_reserve_satoshis.unwrap_or(0) as i64 * 1000, - 0) as u64; - AvailableBalances { - inbound_capacity_msat: cmp::max(self.context.channel_value_satoshis as i64 * 1000 - - self.context.value_to_self_msat as i64 - - self.get_inbound_pending_htlc_stats(None).pending_htlcs_value_msat as i64 - - self.context.holder_selected_channel_reserve_satoshis as i64 * 1000, - 0) as u64, - outbound_capacity_msat, - next_outbound_htlc_limit_msat: cmp::max(cmp::min(outbound_capacity_msat as i64, - self.context.counterparty_max_htlc_value_in_flight_msat as i64 - - outbound_stats.pending_htlcs_value_msat as i64), - 0) as u64, - balance_msat, - } - } - - pub fn get_holder_counterparty_selected_channel_reserve_satoshis(&self) -> (u64, Option) { - (self.context.holder_selected_channel_reserve_satoshis, self.context.counterparty_selected_channel_reserve_satoshis) - } - - // Get the commitment tx fee for the local's (i.e. our) next commitment transaction based on the - // number of pending HTLCs that are on track to be in our next commitment tx, plus an additional - // HTLC if `fee_spike_buffer_htlc` is Some, plus a new HTLC given by `new_htlc_amount`. Dust HTLCs - // are excluded. - fn next_local_commit_tx_fee_msat(&self, htlc: HTLCCandidate, fee_spike_buffer_htlc: Option<()>) -> u64 { - assert!(self.is_outbound()); - - let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if self.opt_anchors() { - (0, 0) - } else { - (self.context.feerate_per_kw as u64 * htlc_success_tx_weight(false) / 1000, - self.context.feerate_per_kw as u64 * htlc_timeout_tx_weight(false) / 1000) - }; - let real_dust_limit_success_sat = htlc_success_dust_limit + self.context.holder_dust_limit_satoshis; - let real_dust_limit_timeout_sat = htlc_timeout_dust_limit + self.context.holder_dust_limit_satoshis; - - let mut addl_htlcs = 0; - if fee_spike_buffer_htlc.is_some() { addl_htlcs += 1; } - match htlc.origin { - HTLCInitiator::LocalOffered => { - if htlc.amount_msat / 1000 >= real_dust_limit_timeout_sat { - addl_htlcs += 1; - } - }, - HTLCInitiator::RemoteOffered => { - if htlc.amount_msat / 1000 >= real_dust_limit_success_sat { - addl_htlcs += 1; - } - } - } - - let mut included_htlcs = 0; - for ref htlc in self.context.pending_inbound_htlcs.iter() { - if htlc.amount_msat / 1000 < real_dust_limit_success_sat { - continue - } - // We include LocalRemoved HTLCs here because we may still need to broadcast a commitment - // transaction including this HTLC if it times out before they RAA. - included_htlcs += 1; - } - - for ref htlc in self.context.pending_outbound_htlcs.iter() { - if htlc.amount_msat / 1000 < real_dust_limit_timeout_sat { - continue - } - match htlc.state { - OutboundHTLCState::LocalAnnounced {..} => included_htlcs += 1, - OutboundHTLCState::Committed => included_htlcs += 1, - OutboundHTLCState::RemoteRemoved {..} => included_htlcs += 1, - // We don't include AwaitingRemoteRevokeToRemove HTLCs because our next commitment - // transaction won't be generated until they send us their next RAA, which will mean - // dropping any HTLCs in this state. - _ => {}, - } - } - - for htlc in self.context.holding_cell_htlc_updates.iter() { - match htlc { - &HTLCUpdateAwaitingACK::AddHTLC { amount_msat, .. } => { - if amount_msat / 1000 < real_dust_limit_timeout_sat { - continue - } - included_htlcs += 1 - }, - _ => {}, // Don't include claims/fails that are awaiting ack, because once we get the - // ack we're guaranteed to never include them in commitment txs anymore. - } - } - - let num_htlcs = included_htlcs + addl_htlcs; - let res = Self::commit_tx_fee_msat(self.context.feerate_per_kw, num_htlcs, self.opt_anchors()); - #[cfg(any(test, fuzzing))] - { - let mut fee = res; - if fee_spike_buffer_htlc.is_some() { - fee = Self::commit_tx_fee_msat(self.context.feerate_per_kw, num_htlcs - 1, self.opt_anchors()); - } - let total_pending_htlcs = self.context.pending_inbound_htlcs.len() + self.context.pending_outbound_htlcs.len() - + self.context.holding_cell_htlc_updates.len(); - let commitment_tx_info = CommitmentTxInfoCached { - fee, - total_pending_htlcs, - next_holder_htlc_id: match htlc.origin { - HTLCInitiator::LocalOffered => self.context.next_holder_htlc_id + 1, - HTLCInitiator::RemoteOffered => self.context.next_holder_htlc_id, - }, - next_counterparty_htlc_id: match htlc.origin { - HTLCInitiator::LocalOffered => self.context.next_counterparty_htlc_id, - HTLCInitiator::RemoteOffered => self.context.next_counterparty_htlc_id + 1, - }, - feerate: self.context.feerate_per_kw, - }; - *self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = Some(commitment_tx_info); - } - res - } - - // Get the commitment tx fee for the remote's next commitment transaction based on the number of - // pending HTLCs that are on track to be in their next commitment tx, plus an additional HTLC if - // `fee_spike_buffer_htlc` is Some, plus a new HTLC given by `new_htlc_amount`. Dust HTLCs are - // excluded. - fn next_remote_commit_tx_fee_msat(&self, htlc: HTLCCandidate, fee_spike_buffer_htlc: Option<()>) -> u64 { - assert!(!self.is_outbound()); - - let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if self.opt_anchors() { - (0, 0) - } else { - (self.context.feerate_per_kw as u64 * htlc_success_tx_weight(false) / 1000, - self.context.feerate_per_kw as u64 * htlc_timeout_tx_weight(false) / 1000) - }; - let real_dust_limit_success_sat = htlc_success_dust_limit + self.context.counterparty_dust_limit_satoshis; - let real_dust_limit_timeout_sat = htlc_timeout_dust_limit + self.context.counterparty_dust_limit_satoshis; - - let mut addl_htlcs = 0; - if fee_spike_buffer_htlc.is_some() { addl_htlcs += 1; } - match htlc.origin { - HTLCInitiator::LocalOffered => { - if htlc.amount_msat / 1000 >= real_dust_limit_success_sat { - addl_htlcs += 1; - } - }, - HTLCInitiator::RemoteOffered => { - if htlc.amount_msat / 1000 >= real_dust_limit_timeout_sat { - addl_htlcs += 1; - } - } - } - - // When calculating the set of HTLCs which will be included in their next commitment_signed, all - // non-dust inbound HTLCs are included (as all states imply it will be included) and only - // committed outbound HTLCs, see below. - let mut included_htlcs = 0; - for ref htlc in self.context.pending_inbound_htlcs.iter() { - if htlc.amount_msat / 1000 <= real_dust_limit_timeout_sat { - continue - } - included_htlcs += 1; - } - - for ref htlc in self.context.pending_outbound_htlcs.iter() { - if htlc.amount_msat / 1000 <= real_dust_limit_success_sat { - continue - } - // We only include outbound HTLCs if it will not be included in their next commitment_signed, - // i.e. if they've responded to us with an RAA after announcement. - match htlc.state { - OutboundHTLCState::Committed => included_htlcs += 1, - OutboundHTLCState::RemoteRemoved {..} => included_htlcs += 1, - OutboundHTLCState::LocalAnnounced { .. } => included_htlcs += 1, - _ => {}, - } - } + log_info!(logger, "Received channel_ready from peer for channel {}", log_bytes!(self.channel_id())); - let num_htlcs = included_htlcs + addl_htlcs; - let res = Self::commit_tx_fee_msat(self.context.feerate_per_kw, num_htlcs, self.opt_anchors()); - #[cfg(any(test, fuzzing))] - { - let mut fee = res; - if fee_spike_buffer_htlc.is_some() { - fee = Self::commit_tx_fee_msat(self.context.feerate_per_kw, num_htlcs - 1, self.opt_anchors()); - } - let total_pending_htlcs = self.context.pending_inbound_htlcs.len() + self.context.pending_outbound_htlcs.len(); - let commitment_tx_info = CommitmentTxInfoCached { - fee, - total_pending_htlcs, - next_holder_htlc_id: match htlc.origin { - HTLCInitiator::LocalOffered => self.context.next_holder_htlc_id + 1, - HTLCInitiator::RemoteOffered => self.context.next_holder_htlc_id, - }, - next_counterparty_htlc_id: match htlc.origin { - HTLCInitiator::LocalOffered => self.context.next_counterparty_htlc_id, - HTLCInitiator::RemoteOffered => self.context.next_counterparty_htlc_id + 1, - }, - feerate: self.context.feerate_per_kw, - }; - *self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = Some(commitment_tx_info); + Ok(self.get_announcement_sigs(node_signer, genesis_block_hash, user_config, best_block.height(), logger)) + } + + /// Returns transaction if there is pending funding transaction that is yet to broadcast + pub fn unbroadcasted_funding(&self) -> Option { + if self.context.channel_state & (ChannelState::FundingCreated as u32) != 0 { + self.context.funding_transaction.clone() + } else { + None } - res } pub fn update_add_htlc(&mut self, msg: &msgs::UpdateAddHTLC, mut pending_forward_status: PendingHTLCStatus, create_pending_htlc_status: F, logger: &L) -> Result<(), ChannelError> @@ -4857,24 +5085,6 @@ impl FundedChannel { self.context.feerate_per_kw } - pub fn get_dust_buffer_feerate(&self, outbound_feerate_update: Option) -> u32 { - // When calculating our exposure to dust HTLCs, we assume that the channel feerate - // may, at any point, increase by at least 10 sat/vB (i.e 2530 sat/kWU) or 25%, - // whichever is higher. This ensures that we aren't suddenly exposed to significantly - // more dust balance if the feerate increases when we have several HTLCs pending - // which are near the dust limit. - let mut feerate_per_kw = self.context.feerate_per_kw; - // If there's a pending update fee, use it to ensure we aren't under-estimating - // potential feerate updates coming soon. - if let Some((feerate, _)) = self.context.pending_update_fee { - feerate_per_kw = cmp::max(feerate_per_kw, feerate); - } - if let Some(feerate) = outbound_feerate_update { - feerate_per_kw = cmp::max(feerate_per_kw, feerate); - } - cmp::max(2530, feerate_per_kw * 1250 / 1000) - } - pub fn get_cur_holder_commitment_transaction_number(&self) -> u64 { self.context.cur_holder_commitment_transaction_number + 1 } @@ -4923,11 +5133,6 @@ impl FundedChannel { self.context.config.options.forwarding_fee_base_msat } - /// Returns true if we've ever received a message from the remote end for this Channel - pub fn have_received_message(&self) -> bool { - self.context.channel_state > (ChannelState::OurInitSent as u32) - } - /// Returns true if this channel is fully established and not known to be closing. /// Allowed in any state (including after shutdown) pub fn is_usable(&self) -> bool { @@ -4952,11 +5157,6 @@ impl FundedChannel { self.context.pending_monitor_updates.first() } - /// Returns true if funding_created was sent/received. - pub fn is_funding_initiated(&self) -> bool { - self.context.channel_state >= ChannelState::FundingSent as u32 - } - /// Returns true if the channel is awaiting the persistence of the initial ChannelMonitor. /// If the channel is outbound, this implies we have not yet broadcasted the funding /// transaction. If the channel is inbound, this implies simply that the channel has not @@ -5200,265 +5400,92 @@ impl FundedChannel { if *cltv_expiry <= unforwarded_htlc_cltv_limit { timed_out_htlcs.push((source.clone(), payment_hash.clone())); false - } else { true } - }, - _ => true - } - }); - - self.context.update_time_counter = cmp::max(self.context.update_time_counter, highest_header_time); - - if let Some(channel_ready) = self.check_get_channel_ready(height) { - let announcement_sigs = if let Some((genesis_block_hash, node_signer, user_config)) = genesis_node_signer { - self.get_announcement_sigs(node_signer, genesis_block_hash, user_config, height, logger) - } else { None }; - log_info!(logger, "Sending a channel_ready to our peer for channel {}", log_bytes!(self.context.channel_id)); - return Ok((Some(channel_ready), timed_out_htlcs, announcement_sigs)); - } - - let non_shutdown_state = self.context.channel_state & (!MULTI_STATE_FLAGS); - if non_shutdown_state >= ChannelState::ChannelReady as u32 || - (non_shutdown_state & ChannelState::OurChannelReady as u32) == ChannelState::OurChannelReady as u32 { - let mut funding_tx_confirmations = height as i64 - self.context.funding_tx_confirmation_height as i64 + 1; - if self.context.funding_tx_confirmation_height == 0 { - // Note that check_get_channel_ready may reset funding_tx_confirmation_height to - // zero if it has been reorged out, however in either case, our state flags - // indicate we've already sent a channel_ready - funding_tx_confirmations = 0; - } - - // If we've sent channel_ready (or have both sent and received channel_ready), and - // the funding transaction has become unconfirmed, - // close the channel and hope we can get the latest state on chain (because presumably - // the funding transaction is at least still in the mempool of most nodes). - // - // Note that ideally we wouldn't force-close if we see *any* reorg on a 1-conf or - // 0-conf channel, but not doing so may lead to the - // `ChannelManager::short_to_chan_info` map being inconsistent, so we currently have - // to. - if funding_tx_confirmations == 0 && self.context.funding_tx_confirmed_in.is_some() { - let err_reason = format!("Funding transaction was un-confirmed. Locked at {} confs, now have {} confs.", - self.context.minimum_depth.unwrap(), funding_tx_confirmations); - return Err(ClosureReason::ProcessingError { err: err_reason }); - } - } else if !self.is_outbound() && self.context.funding_tx_confirmed_in.is_none() && - height >= self.context.channel_creation_height + FUNDING_CONF_DEADLINE_BLOCKS { - log_info!(logger, "Closing channel {} due to funding timeout", log_bytes!(self.context.channel_id)); - // If funding_tx_confirmed_in is unset, the channel must not be active - assert!(non_shutdown_state <= ChannelState::ChannelReady as u32); - assert_eq!(non_shutdown_state & ChannelState::OurChannelReady as u32, 0); - return Err(ClosureReason::FundingTimedOut); - } - - let announcement_sigs = if let Some((genesis_block_hash, node_signer, user_config)) = genesis_node_signer { - self.get_announcement_sigs(node_signer, genesis_block_hash, user_config, height, logger) - } else { None }; - Ok((None, timed_out_htlcs, announcement_sigs)) - } - - /// Indicates the funding transaction is no longer confirmed in the main chain. This may - /// force-close the channel, but may also indicate a harmless reorganization of a block or two - /// before the channel has reached channel_ready and we can just wait for more blocks. - pub fn funding_transaction_unconfirmed(&mut self, logger: &L) -> Result<(), ClosureReason> where L::Target: Logger { - if self.context.funding_tx_confirmation_height != 0 { - // We handle the funding disconnection by calling best_block_updated with a height one - // below where our funding was connected, implying a reorg back to conf_height - 1. - let reorg_height = self.context.funding_tx_confirmation_height - 1; - // We use the time field to bump the current time we set on channel updates if its - // larger. If we don't know that time has moved forward, we can just set it to the last - // time we saw and it will be ignored. - let best_time = self.context.update_time_counter; - match self.do_best_block_updated(reorg_height, best_time, None::<(BlockHash, &&NodeSigner, &UserConfig)>, logger) { - Ok((channel_ready, timed_out_htlcs, announcement_sigs)) => { - assert!(channel_ready.is_none(), "We can't generate a funding with 0 confirmations?"); - assert!(timed_out_htlcs.is_empty(), "We can't have accepted HTLCs with a timeout before our funding confirmation?"); - assert!(announcement_sigs.is_none(), "We can't generate an announcement_sigs with 0 confirmations?"); - Ok(()) - }, - Err(e) => Err(e) - } - } else { - // We never learned about the funding confirmation anyway, just ignore - Ok(()) - } - } - - // Methods to get unprompted messages to send to the remote end (or where we already returned - // something in the handler for the message that prompted this message): - - pub fn get_open_channel(&self, chain_hash: BlockHash) -> msgs::OpenChannel { - if !self.is_outbound() { - panic!("Tried to open a channel for an inbound channel?"); - } - if self.context.channel_state != ChannelState::OurInitSent as u32 { - panic!("Cannot generate an open_channel after we've moved forward"); - } - - if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER { - panic!("Tried to send an open_channel for a channel that has already advanced"); - } - - let first_per_commitment_point = self.context.holder_signer.get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx); - let keys = self.get_holder_pubkeys(); - - msgs::OpenChannel { - chain_hash, - temporary_channel_id: self.context.channel_id, - funding_satoshis: self.context.channel_value_satoshis, - push_msat: self.context.channel_value_satoshis * 1000 - self.context.value_to_self_msat, - dust_limit_satoshis: self.context.holder_dust_limit_satoshis, - max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat, - channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis, - htlc_minimum_msat: self.context.holder_htlc_minimum_msat, - feerate_per_kw: self.context.feerate_per_kw as u32, - to_self_delay: self.get_holder_selected_contest_delay(), - max_accepted_htlcs: OUR_MAX_HTLCS, - funding_pubkey: keys.funding_pubkey, - revocation_basepoint: keys.revocation_basepoint, - payment_point: keys.payment_point, - delayed_payment_basepoint: keys.delayed_payment_basepoint, - htlc_basepoint: keys.htlc_basepoint, - first_per_commitment_point, - channel_flags: if self.context.config.announced_channel {1} else {0}, - shutdown_scriptpubkey: OptionalField::Present(match &self.context.shutdown_scriptpubkey { - Some(script) => script.clone().into_inner(), - None => Builder::new().into_script(), - }), - channel_type: Some(self.context.channel_type.clone()), - } - } - - pub fn inbound_is_awaiting_accept(&self) -> bool { - self.context.inbound_awaiting_accept - } - - /// Sets this channel to accepting 0conf, must be done before `get_accept_channel` - pub fn set_0conf(&mut self) { - assert!(self.context.inbound_awaiting_accept); - self.context.minimum_depth = Some(0); - } - - /// Marks an inbound channel as accepted and generates a [`msgs::AcceptChannel`] message which - /// should be sent back to the counterparty node. - /// - /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel - pub fn accept_inbound_channel(&mut self, user_id: u128) -> msgs::AcceptChannel { - if self.is_outbound() { - panic!("Tried to send accept_channel for an outbound channel?"); - } - if self.context.channel_state != (ChannelState::OurInitSent as u32) | (ChannelState::TheirInitSent as u32) { - panic!("Tried to send accept_channel after channel had moved forward"); - } - if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER { - panic!("Tried to send an accept_channel for a channel that has already advanced"); - } - if !self.context.inbound_awaiting_accept { - panic!("The inbound channel has already been accepted"); - } - - self.context.user_id = user_id; - self.context.inbound_awaiting_accept = false; - - self.generate_accept_channel_message() - } - - /// This function is used to explicitly generate a [`msgs::AcceptChannel`] message for an - /// inbound channel. If the intention is to accept an inbound channel, use - /// [`Channel::accept_inbound_channel`] instead. - /// - /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel - fn generate_accept_channel_message(&self) -> msgs::AcceptChannel { - let first_per_commitment_point = self.context.holder_signer.get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx); - let keys = self.get_holder_pubkeys(); - - msgs::AcceptChannel { - temporary_channel_id: self.context.channel_id, - dust_limit_satoshis: self.context.holder_dust_limit_satoshis, - max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat, - channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis, - htlc_minimum_msat: self.context.holder_htlc_minimum_msat, - minimum_depth: self.context.minimum_depth.unwrap(), - to_self_delay: self.get_holder_selected_contest_delay(), - max_accepted_htlcs: OUR_MAX_HTLCS, - funding_pubkey: keys.funding_pubkey, - revocation_basepoint: keys.revocation_basepoint, - payment_point: keys.payment_point, - delayed_payment_basepoint: keys.delayed_payment_basepoint, - htlc_basepoint: keys.htlc_basepoint, - first_per_commitment_point, - shutdown_scriptpubkey: OptionalField::Present(match &self.context.shutdown_scriptpubkey { - Some(script) => script.clone().into_inner(), - None => Builder::new().into_script(), - }), - channel_type: Some(self.context.channel_type.clone()), - } - } - - /// Enables the possibility for tests to extract a [`msgs::AcceptChannel`] message for an - /// inbound channel without accepting it. - /// - /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel - #[cfg(test)] - pub fn get_accept_channel_message(&self) -> msgs::AcceptChannel { - self.generate_accept_channel_message() - } - - /// If an Err is returned, it is a ChannelError::Close (for get_outbound_funding_created) - fn get_outbound_funding_created_signature(&mut self, logger: &L) -> Result where L::Target: Logger { - let counterparty_keys = self.build_remote_transaction_keys(); - let counterparty_initial_commitment_tx = self.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx; - Ok(self.context.holder_signer.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), &self.context.secp_ctx) - .map_err(|_| ChannelError::Close("Failed to get signatures for new commitment_signed".to_owned()))?.0) - } + } else { true } + }, + _ => true + } + }); - /// Updates channel state with knowledge of the funding transaction's txid/index, and generates - /// a funding_created message for the remote peer. - /// Panics if called at some time other than immediately after initial handshake, if called twice, - /// or if called on an inbound channel. - /// Note that channel_id changes during this call! - /// Do NOT broadcast the funding transaction until after a successful funding_signed call! - /// If an Err is returned, it is a ChannelError::Close. - pub fn get_outbound_funding_created(&mut self, funding_transaction: Transaction, funding_txo: OutPoint, logger: &L) -> Result where L::Target: Logger { - if !self.is_outbound() { - panic!("Tried to create outbound funding_created message on an inbound channel!"); - } - if self.context.channel_state != (ChannelState::OurInitSent as u32 | ChannelState::TheirInitSent as u32) { - panic!("Tried to get a funding_created messsage at a time other than immediately after initial handshake completion (or tried to get funding_created twice)"); - } - if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) || - self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER || - self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER { - panic!("Should not have advanced channel commitment tx numbers prior to funding_created"); - } + self.context.update_time_counter = cmp::max(self.context.update_time_counter, highest_header_time); - self.context.channel_transaction_parameters.funding_outpoint = Some(funding_txo); - self.context.holder_signer.provide_channel_parameters(&self.context.channel_transaction_parameters); + if let Some(channel_ready) = self.check_get_channel_ready(height) { + let announcement_sigs = if let Some((genesis_block_hash, node_signer, user_config)) = genesis_node_signer { + self.get_announcement_sigs(node_signer, genesis_block_hash, user_config, height, logger) + } else { None }; + log_info!(logger, "Sending a channel_ready to our peer for channel {}", log_bytes!(self.context.channel_id)); + return Ok((Some(channel_ready), timed_out_htlcs, announcement_sigs)); + } - let signature = match self.get_outbound_funding_created_signature(logger) { - Ok(res) => res, - Err(e) => { - log_error!(logger, "Got bad signatures: {:?}!", e); - self.context.channel_transaction_parameters.funding_outpoint = None; - return Err(e); + let non_shutdown_state = self.context.channel_state & (!MULTI_STATE_FLAGS); + if non_shutdown_state >= ChannelState::ChannelReady as u32 || + (non_shutdown_state & ChannelState::OurChannelReady as u32) == ChannelState::OurChannelReady as u32 { + let mut funding_tx_confirmations = height as i64 - self.context.funding_tx_confirmation_height as i64 + 1; + if self.context.funding_tx_confirmation_height == 0 { + // Note that check_get_channel_ready may reset funding_tx_confirmation_height to + // zero if it has been reorged out, however in either case, our state flags + // indicate we've already sent a channel_ready + funding_tx_confirmations = 0; } - }; - let temporary_channel_id = self.context.channel_id; - - // Now that we're past error-generating stuff, update our local state: + // If we've sent channel_ready (or have both sent and received channel_ready), and + // the funding transaction has become unconfirmed, + // close the channel and hope we can get the latest state on chain (because presumably + // the funding transaction is at least still in the mempool of most nodes). + // + // Note that ideally we wouldn't force-close if we see *any* reorg on a 1-conf or + // 0-conf channel, but not doing so may lead to the + // `ChannelManager::short_to_chan_info` map being inconsistent, so we currently have + // to. + if funding_tx_confirmations == 0 && self.context.funding_tx_confirmed_in.is_some() { + let err_reason = format!("Funding transaction was un-confirmed. Locked at {} confs, now have {} confs.", + self.context.minimum_depth.unwrap(), funding_tx_confirmations); + return Err(ClosureReason::ProcessingError { err: err_reason }); + } + } else if !self.is_outbound() && self.context.funding_tx_confirmed_in.is_none() && + height >= self.context.channel_creation_height + FUNDING_CONF_DEADLINE_BLOCKS { + log_info!(logger, "Closing channel {} due to funding timeout", log_bytes!(self.context.channel_id)); + // If funding_tx_confirmed_in is unset, the channel must not be active + assert!(non_shutdown_state <= ChannelState::ChannelReady as u32); + assert_eq!(non_shutdown_state & ChannelState::OurChannelReady as u32, 0); + return Err(ClosureReason::FundingTimedOut); + } - self.context.channel_state = ChannelState::FundingCreated as u32; - self.context.channel_id = funding_txo.to_channel_id(); - self.context.funding_transaction = Some(funding_transaction); + let announcement_sigs = if let Some((genesis_block_hash, node_signer, user_config)) = genesis_node_signer { + self.get_announcement_sigs(node_signer, genesis_block_hash, user_config, height, logger) + } else { None }; + Ok((None, timed_out_htlcs, announcement_sigs)) + } - Ok(msgs::FundingCreated { - temporary_channel_id, - funding_txid: funding_txo.txid, - funding_output_index: funding_txo.index, - signature - }) + /// Indicates the funding transaction is no longer confirmed in the main chain. This may + /// force-close the channel, but may also indicate a harmless reorganization of a block or two + /// before the channel has reached channel_ready and we can just wait for more blocks. + pub fn funding_transaction_unconfirmed(&mut self, logger: &L) -> Result<(), ClosureReason> where L::Target: Logger { + if self.context.funding_tx_confirmation_height != 0 { + // We handle the funding disconnection by calling best_block_updated with a height one + // below where our funding was connected, implying a reorg back to conf_height - 1. + let reorg_height = self.context.funding_tx_confirmation_height - 1; + // We use the time field to bump the current time we set on channel updates if its + // larger. If we don't know that time has moved forward, we can just set it to the last + // time we saw and it will be ignored. + let best_time = self.context.update_time_counter; + match self.do_best_block_updated(reorg_height, best_time, None::<(BlockHash, &&NodeSigner, &UserConfig)>, logger) { + Ok((channel_ready, timed_out_htlcs, announcement_sigs)) => { + assert!(channel_ready.is_none(), "We can't generate a funding with 0 confirmations?"); + assert!(timed_out_htlcs.is_empty(), "We can't have accepted HTLCs with a timeout before our funding confirmation?"); + assert!(announcement_sigs.is_none(), "We can't generate an announcement_sigs with 0 confirmations?"); + Ok(()) + }, + Err(e) => Err(e) + } + } else { + // We never learned about the funding confirmation anyway, just ignore + Ok(()) + } } + // Methods to get unprompted messages to send to the remote end (or where we already returned + // something in the handler for the message that prompted this message): + /// Gets an UnsignedChannelAnnouncement for this channel. The channel must be publicly /// announceable and available for use (have exchanged ChannelReady messages in both /// directions). Should be used for both broadcasted announcements and in response to an @@ -5996,11 +6023,6 @@ impl FundedChannel { } } - /// Get forwarding information for the counterparty. - pub fn counterparty_forwarding_info(&self) -> Option { - self.context.counterparty_forwarding_info.clone() - } - pub fn channel_update(&mut self, msg: &msgs::ChannelUpdate) -> Result<(), ChannelError> { if msg.contents.htlc_minimum_msat >= self.context.channel_value_satoshis * 1000 { return Err(ChannelError::Close("Minimum htlc value is greater than channel value".to_string())); @@ -6107,53 +6129,6 @@ impl FundedChannel { Ok((shutdown, monitor_update, dropped_outbound_htlcs)) } - /// Gets the latest commitment transaction and any dependent transactions for relay (forcing - /// shutdown of this channel - no more calls into this Channel may be made afterwards except - /// those explicitly stated to be allowed after shutdown completes, eg some simple getters). - /// Also returns the list of payment_hashes for channels which we can safely fail backwards - /// immediately (others we will have to allow to time out). - pub fn force_shutdown(&mut self, should_broadcast: bool) -> (Option<(OutPoint, ChannelMonitorUpdate)>, Vec<(HTLCSource, PaymentHash, PublicKey, [u8; 32])>) { - let context = self.get_context_mut(); - // Note that we MUST only generate a monitor update that indicates force-closure - we're - // called during initialization prior to the chain_monitor in the encompassing ChannelManager - // being fully configured in some cases. Thus, its likely any monitor events we generate will - // be delayed in being processed! See the docs for `ChannelManagerReadArgs` for more. - assert!(context.channel_state != ChannelState::ShutdownComplete as u32); - - // We go ahead and "free" any holding cell HTLCs or HTLCs we haven't yet committed to and - // return them to fail the payment. - let mut dropped_outbound_htlcs = Vec::with_capacity(context.holding_cell_htlc_updates.len()); - let counterparty_node_id = context.counterparty_node_id; - for htlc_update in context.holding_cell_htlc_updates.drain(..) { - match htlc_update { - HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. } => { - dropped_outbound_htlcs.push((source, payment_hash, counterparty_node_id, context.channel_id)); - }, - _ => {} - } - } - let monitor_update = if let Some(funding_txo) = context.channel_transaction_parameters.funding_outpoint { - // If we haven't yet exchanged funding signatures (ie channel_state < FundingSent), - // returning a channel monitor update here would imply a channel monitor update before - // we even registered the channel monitor to begin with, which is invalid. - // Thus, if we aren't actually at a point where we could conceivably broadcast the - // funding transaction, don't return a funding txo (which prevents providing the - // monitor update to the user, even if we return one). - // See test_duplicate_chan_id and test_pre_lockin_no_chan_closed_update for more. - if context.channel_state & (ChannelState::FundingSent as u32 | ChannelState::ChannelReady as u32 | ChannelState::ShutdownComplete as u32) != 0 { - context.latest_monitor_update_id += 1; - Some((funding_txo, ChannelMonitorUpdate { - update_id: context.latest_monitor_update_id, - updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast }], - })) - } else { None } - } else { None }; - - context.channel_state = ChannelState::ShutdownComplete as u32; - context.update_time_counter += 1; - (monitor_update, dropped_outbound_htlcs) - } - pub fn inflight_htlc_sources(&self) -> impl Iterator { self.context.holding_cell_htlc_updates.iter() .flat_map(|htlc_update| { @@ -6173,6 +6148,90 @@ impl FundedChannel { pub fn take_workaround_lnd_bug_4006(&mut self) -> Option { self.context.workaround_lnd_bug_4006.take() } + + /// Handles a funding_signed message from the remote end. + /// If this call is successful, broadcast the funding transaction (and not before!) + pub fn funding_signed( + &mut self, msg: &msgs::FundingSigned, best_block: BestBlock, signer_provider: &SP, logger: &L + ) -> Result, ChannelError> + where + SP::Target: SignerProvider, + L::Target: Logger + { + if !self.is_outbound() { + return Err(ChannelError::Close("Received funding_signed for an inbound channel?".to_owned())); + } + if self.context.channel_state & !(ChannelState::MonitorUpdateInProgress as u32) != ChannelState::FundingCreated as u32 { + return Err(ChannelError::Close("Received funding_signed in strange state!".to_owned())); + } + if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) || + self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER || + self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER { + panic!("Should not have advanced channel commitment tx numbers prior to funding_created"); + } + + let funding_script = self.get_funding_redeemscript(); + + let counterparty_keys = self.build_remote_transaction_keys(); + let counterparty_initial_commitment_tx = self.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx; + let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust(); + let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction(); + + log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}", + log_bytes!(self.channel_id()), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction)); + + let holder_signer = self.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number); + let initial_commitment_tx = self.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &holder_signer, true, false, logger).tx; + { + let trusted_tx = initial_commitment_tx.trust(); + let initial_commitment_bitcoin_tx = trusted_tx.built_transaction(); + let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis); + // They sign our commitment transaction, allowing us to broadcast the tx if we wish. + if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.get_counterparty_pubkeys().funding_pubkey) { + return Err(ChannelError::Close("Invalid funding_signed signature from peer".to_owned())); + } + } + + let holder_commitment_tx = HolderCommitmentTransaction::new( + initial_commitment_tx, + msg.signature, + Vec::new(), + &self.get_holder_pubkeys().funding_pubkey, + self.counterparty_funding_pubkey() + ); + + self.context.holder_signer.validate_holder_commitment(&holder_commitment_tx, Vec::new()) + .map_err(|_| ChannelError::Close("Failed to validate our commitment".to_owned()))?; + + + let funding_redeemscript = self.get_funding_redeemscript(); + let funding_txo = self.get_funding_txo().unwrap(); + let funding_txo_script = funding_redeemscript.to_v0_p2wsh(); + let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.get_holder_pubkeys().payment_point, &self.get_counterparty_pubkeys().payment_point, self.is_outbound()); + let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner()); + let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id); + monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters); + let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer, + shutdown_script, self.get_holder_selected_contest_delay(), + &self.context.destination_script, (funding_txo, funding_txo_script), + &self.context.channel_transaction_parameters, + funding_redeemscript.clone(), self.context.channel_value_satoshis, + obscure_factor, + holder_commitment_tx, best_block, self.context.counterparty_node_id); + + channel_monitor.provide_latest_counterparty_commitment_tx(counterparty_initial_bitcoin_tx.txid, Vec::new(), self.context.cur_counterparty_commitment_transaction_number, self.context.counterparty_cur_commitment_point.unwrap(), logger); + + assert_eq!(self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32), 0); // We have no had any monitor(s) yet to fail update! + self.context.channel_state = ChannelState::FundingSent as u32; + self.context.cur_holder_commitment_transaction_number -= 1; + self.context.cur_counterparty_commitment_transaction_number -= 1; + + log_info!(logger, "Received funding_signed from peer for channel {}", log_bytes!(self.channel_id())); + + let need_channel_ready = self.check_get_channel_ready(0).is_some(); + self.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new()); + Ok(channel_monitor) + } } const SERIALIZATION_VERSION: u8 = 3; @@ -6995,7 +7054,7 @@ mod tests { use crate::ln::channelmanager::{self, HTLCSource, PaymentId}; #[cfg(anchors)] use crate::ln::channel::InitFeatures; - use crate::ln::channel::{FundedChannel, ChannelInterface, InboundHTLCOutput, OutboundHTLCOutput, InboundHTLCState, OutboundHTLCState, HTLCCandidate, HTLCInitiator}; + use crate::ln::channel::{FundedChannel, ChannelInterface, InboundHTLCOutput, OutboundHTLCOutput, InboundHTLCState, OutboundHTLCState, HTLCCandidate, HTLCInitiator, InboundV1Channel}; use crate::ln::channel::{MAX_FUNDING_SATOSHIS_NO_WUMBO, TOTAL_BITCOIN_SUPPLY_SATOSHIS, MIN_THEIR_CHAN_RESERVE_SATOSHIS}; use crate::ln::features::ChannelTypeFeatures; use crate::ln::msgs::{ChannelUpdate, DataLossProtect, DecodeError, OptionalField, UnsignedChannelUpdate, MAX_VALUE_MSAT}; @@ -7021,6 +7080,8 @@ mod tests { use bitcoin::util::address::WitnessVersion; use crate::prelude::*; +use super::OutboundV1Channel; + struct TestFeeEstimator { fee_est: u32 } @@ -7103,7 +7164,7 @@ mod tests { let secp_ctx = Secp256k1::new(); let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap()); let config = UserConfig::default(); - match FundedChannel::::new_outbound(&LowerBoundedFeeEstimator::new(&TestFeeEstimator { fee_est: 253 }), &&keys_provider, &&keys_provider, node_id, &features, 10000000, 100000, 42, &config, 0, 42) { + match OutboundV1Channel::::new_outbound(&LowerBoundedFeeEstimator::new(&TestFeeEstimator { fee_est: 253 }), &&keys_provider, &&keys_provider, node_id, &features, 10000000, 100000, 42, &config, 0, 42) { Err(APIError::IncompatibleShutdownScript { script }) => { assert_eq!(script.into_inner(), non_v0_segwit_shutdown_script.into_inner()); }, @@ -7126,7 +7187,7 @@ mod tests { let node_a_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap()); let config = UserConfig::default(); - let node_a_chan = FundedChannel::::new_outbound(&bounded_fee_estimator, &&keys_provider, &&keys_provider, node_a_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap(); + let node_a_chan = OutboundV1Channel::::new_outbound(&bounded_fee_estimator, &&keys_provider, &&keys_provider, node_a_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap(); // Now change the fee so we can check that the fee in the open_channel message is the // same as the old fee. @@ -7145,6 +7206,7 @@ mod tests { let network = Network::Testnet; let keys_provider = test_utils::TestKeysInterface::new(&seed, network); let logger = test_utils::TestLogger::new(); + let best_block = BestBlock::from_network(network); // Go through the flow of opening a channel between two nodes, making sure // they have different dust limits. @@ -7152,13 +7214,13 @@ mod tests { // Create Node A's channel pointing to Node B's pubkey let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap()); let config = UserConfig::default(); - let mut node_a_chan = FundedChannel::::new_outbound(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap(); + let mut node_a_chan = OutboundV1Channel::::new_outbound(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap(); // Create Node B's channel by receiving Node A's open_channel message // Make sure A's dust limit is as we expect. let open_channel_msg = node_a_chan.get_open_channel(genesis_block(network).header.block_hash()); let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap()); - let mut node_b_chan = FundedChannel::::new_from_req(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, 42).unwrap(); + let mut node_b_chan = InboundV1Channel::::new_from_req(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, 42).unwrap(); // Node B --> Node A: accept channel, explicitly setting B's dust limit. let mut accept_channel_msg = node_b_chan.accept_inbound_channel(0); @@ -7166,6 +7228,18 @@ mod tests { node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap(); node_a_chan.context.holder_dust_limit_satoshis = 1560; + // Node A --> Node B: funding created + let output_script = node_a_chan.get_funding_redeemscript(); + let tx = Transaction { version: 1, lock_time: PackedLockTime::ZERO, input: Vec::new(), output: vec![TxOut { + value: 10000000, script_pubkey: output_script.clone(), + }]}; + let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 }; + let (mut node_a_chan, funding_created_msg) = node_a_chan.get_outbound_funding_created(tx.clone(), funding_outpoint, &&logger).map_err(|(_, e)| e).unwrap(); + let (mut node_b_chan, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg, best_block, &&keys_provider, &&logger).map_err(|(_, e)| e).unwrap(); + + // Node B --> Node A: funding signed + let _ = node_a_chan.funding_signed(&funding_signed_msg, best_block, &&keys_provider, &&logger); + // Put some inbound and outbound HTLCs in A's channel. let htlc_amount_msat = 11_092_000; // put an amount below A's effective dust limit but above B's. node_a_chan.context.pending_inbound_htlcs.push(InboundHTLCOutput { @@ -7221,7 +7295,7 @@ mod tests { let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap()); let config = UserConfig::default(); - let mut chan = FundedChannel::::new_outbound(&fee_est, &&keys_provider, &&keys_provider, node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap(); + let mut chan = OutboundV1Channel::::new_outbound(&fee_est, &&keys_provider, &&keys_provider, node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap(); let commitment_tx_fee_0_htlcs = FundedChannel::::commit_tx_fee_msat(chan.context.feerate_per_kw, 0, chan.opt_anchors()); let commitment_tx_fee_1_htlc = FundedChannel::::commit_tx_fee_msat(chan.context.feerate_per_kw, 1, chan.opt_anchors()); @@ -7270,12 +7344,12 @@ mod tests { // Create Node A's channel pointing to Node B's pubkey let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap()); let config = UserConfig::default(); - let mut node_a_chan = FundedChannel::::new_outbound(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap(); + let mut node_a_chan = OutboundV1Channel::::new_outbound(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap(); // Create Node B's channel by receiving Node A's open_channel message let open_channel_msg = node_a_chan.get_open_channel(chain_hash); let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap()); - let mut node_b_chan = FundedChannel::::new_from_req(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, 42).unwrap(); + let mut node_b_chan = InboundV1Channel::::new_from_req(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, 42).unwrap(); // Node B --> Node A: accept channel let accept_channel_msg = node_b_chan.accept_inbound_channel(0); @@ -7287,8 +7361,8 @@ mod tests { value: 10000000, script_pubkey: output_script.clone(), }]}; let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 }; - let funding_created_msg = node_a_chan.get_outbound_funding_created(tx.clone(), funding_outpoint, &&logger).unwrap(); - let (funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg, best_block, &&keys_provider, &&logger).unwrap(); + let (mut node_a_chan, funding_created_msg) = node_a_chan.get_outbound_funding_created(tx.clone(), funding_outpoint, &&logger).map_err(|(_, e)| e).unwrap(); + let (mut node_b_chan, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg, best_block, &&keys_provider, &&logger).map_err(|(_, e)| e).unwrap(); // Node B --> Node A: funding signed let _ = node_a_chan.funding_signed(&funding_signed_msg, best_block, &&keys_provider, &&logger); @@ -7343,12 +7417,12 @@ mod tests { // Test that `new_outbound` creates a channel with the correct value for // `holder_max_htlc_value_in_flight_msat`, when configured with a valid percentage value, // which is set to the lower bound + 1 (2%) of the `channel_value`. - let chan_1 = FundedChannel::::new_outbound(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_2_percent), 10000000, 100000, 42, &config_2_percent, 0, 42).unwrap(); + let chan_1 = OutboundV1Channel::::new_outbound(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_2_percent), 10000000, 100000, 42, &config_2_percent, 0, 42).unwrap(); let chan_1_value_msat = chan_1.context.channel_value_satoshis * 1000; assert_eq!(chan_1.context.holder_max_htlc_value_in_flight_msat, (chan_1_value_msat as f64 * 0.02) as u64); // Test with the upper bound - 1 of valid values (99%). - let chan_2 = FundedChannel::::new_outbound(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_99_percent), 10000000, 100000, 42, &config_99_percent, 0, 42).unwrap(); + let chan_2 = OutboundV1Channel::::new_outbound(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_99_percent), 10000000, 100000, 42, &config_99_percent, 0, 42).unwrap(); let chan_2_value_msat = chan_2.context.channel_value_satoshis * 1000; assert_eq!(chan_2.context.holder_max_htlc_value_in_flight_msat, (chan_2_value_msat as f64 * 0.99) as u64); @@ -7357,38 +7431,38 @@ mod tests { // Test that `new_from_req` creates a channel with the correct value for // `holder_max_htlc_value_in_flight_msat`, when configured with a valid percentage value, // which is set to the lower bound - 1 (2%) of the `channel_value`. - let chan_3 = FundedChannel::::new_from_req(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_2_percent), &channelmanager::provided_init_features(&config_2_percent), &chan_1_open_channel_msg, 7, &config_2_percent, 0, &&logger, 42).unwrap(); + let chan_3 = InboundV1Channel::::new_from_req(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_2_percent), &channelmanager::provided_init_features(&config_2_percent), &chan_1_open_channel_msg, 7, &config_2_percent, 0, &&logger, 42).unwrap(); let chan_3_value_msat = chan_3.context.channel_value_satoshis * 1000; assert_eq!(chan_3.context.holder_max_htlc_value_in_flight_msat, (chan_3_value_msat as f64 * 0.02) as u64); // Test with the upper bound - 1 of valid values (99%). - let chan_4 = FundedChannel::::new_from_req(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_99_percent), &channelmanager::provided_init_features(&config_99_percent), &chan_1_open_channel_msg, 7, &config_99_percent, 0, &&logger, 42).unwrap(); + let chan_4 = InboundV1Channel::::new_from_req(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_99_percent), &channelmanager::provided_init_features(&config_99_percent), &chan_1_open_channel_msg, 7, &config_99_percent, 0, &&logger, 42).unwrap(); let chan_4_value_msat = chan_4.context.channel_value_satoshis * 1000; assert_eq!(chan_4.context.holder_max_htlc_value_in_flight_msat, (chan_4_value_msat as f64 * 0.99) as u64); // Test that `new_outbound` uses the lower bound of the configurable percentage values (1%) // if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a value less than 1. - let chan_5 = FundedChannel::::new_outbound(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_0_percent), 10000000, 100000, 42, &config_0_percent, 0, 42).unwrap(); + let chan_5 = OutboundV1Channel::::new_outbound(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_0_percent), 10000000, 100000, 42, &config_0_percent, 0, 42).unwrap(); let chan_5_value_msat = chan_5.context.channel_value_satoshis * 1000; assert_eq!(chan_5.context.holder_max_htlc_value_in_flight_msat, (chan_5_value_msat as f64 * 0.01) as u64); // Test that `new_outbound` uses the upper bound of the configurable percentage values // (100%) if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a larger value // than 100. - let chan_6 = FundedChannel::::new_outbound(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_101_percent), 10000000, 100000, 42, &config_101_percent, 0, 42).unwrap(); + let chan_6 = OutboundV1Channel::::new_outbound(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_101_percent), 10000000, 100000, 42, &config_101_percent, 0, 42).unwrap(); let chan_6_value_msat = chan_6.context.channel_value_satoshis * 1000; assert_eq!(chan_6.context.holder_max_htlc_value_in_flight_msat, chan_6_value_msat); // Test that `new_from_req` uses the lower bound of the configurable percentage values (1%) // if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a value less than 1. - let chan_7 = FundedChannel::::new_from_req(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_0_percent), &channelmanager::provided_init_features(&config_0_percent), &chan_1_open_channel_msg, 7, &config_0_percent, 0, &&logger, 42).unwrap(); + let chan_7 = InboundV1Channel::::new_from_req(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_0_percent), &channelmanager::provided_init_features(&config_0_percent), &chan_1_open_channel_msg, 7, &config_0_percent, 0, &&logger, 42).unwrap(); let chan_7_value_msat = chan_7.context.channel_value_satoshis * 1000; assert_eq!(chan_7.context.holder_max_htlc_value_in_flight_msat, (chan_7_value_msat as f64 * 0.01) as u64); // Test that `new_from_req` uses the upper bound of the configurable percentage values // (100%) if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a larger value // than 100. - let chan_8 = FundedChannel::::new_from_req(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_101_percent), &channelmanager::provided_init_features(&config_101_percent), &chan_1_open_channel_msg, 7, &config_101_percent, 0, &&logger, 42).unwrap(); + let chan_8 = InboundV1Channel::::new_from_req(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_101_percent), &channelmanager::provided_init_features(&config_101_percent), &chan_1_open_channel_msg, 7, &config_101_percent, 0, &&logger, 42).unwrap(); let chan_8_value_msat = chan_8.context.channel_value_satoshis * 1000; assert_eq!(chan_8.context.holder_max_htlc_value_in_flight_msat, chan_8_value_msat); } @@ -7428,7 +7502,7 @@ mod tests { let mut outbound_node_config = UserConfig::default(); outbound_node_config.channel_handshake_config.their_channel_reserve_proportional_millionths = (outbound_selected_channel_reserve_perc * 1_000_000.0) as u32; - let chan = FundedChannel::::new_outbound(&&fee_est, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&outbound_node_config), channel_value_satoshis, 100_000, 42, &outbound_node_config, 0, 42).unwrap(); + let chan = OutboundV1Channel::::new_outbound(&&fee_est, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&outbound_node_config), channel_value_satoshis, 100_000, 42, &outbound_node_config, 0, 42).unwrap(); let expected_outbound_selected_chan_reserve = cmp::max(MIN_THEIR_CHAN_RESERVE_SATOSHIS, (chan.context.channel_value_satoshis as f64 * outbound_selected_channel_reserve_perc) as u64); assert_eq!(chan.context.holder_selected_channel_reserve_satoshis, expected_outbound_selected_chan_reserve); @@ -7438,7 +7512,7 @@ mod tests { inbound_node_config.channel_handshake_config.their_channel_reserve_proportional_millionths = (inbound_selected_channel_reserve_perc * 1_000_000.0) as u32; if outbound_selected_channel_reserve_perc + inbound_selected_channel_reserve_perc < 1.0 { - let chan_inbound_node = FundedChannel::::new_from_req(&&fee_est, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&inbound_node_config), &channelmanager::provided_init_features(&outbound_node_config), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, 42).unwrap(); + let chan_inbound_node = InboundV1Channel::::new_from_req(&&fee_est, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&inbound_node_config), &channelmanager::provided_init_features(&outbound_node_config), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, 42).unwrap(); let expected_inbound_selected_chan_reserve = cmp::max(MIN_THEIR_CHAN_RESERVE_SATOSHIS, (chan.context.channel_value_satoshis as f64 * inbound_selected_channel_reserve_perc) as u64); @@ -7446,7 +7520,7 @@ mod tests { assert_eq!(chan_inbound_node.context.counterparty_selected_channel_reserve_satoshis.unwrap(), expected_outbound_selected_chan_reserve); } else { // Channel Negotiations failed - let result = FundedChannel::::new_from_req(&&fee_est, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&inbound_node_config), &channelmanager::provided_init_features(&outbound_node_config), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, 42); + let result = InboundV1Channel::::new_from_req(&&fee_est, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&inbound_node_config), &channelmanager::provided_init_features(&outbound_node_config), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, 42); assert!(result.is_err()); } } @@ -7463,7 +7537,7 @@ mod tests { // Create a channel. let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap()); let config = UserConfig::default(); - let mut node_a_chan = FundedChannel::::new_outbound(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap(); + let mut node_a_chan = OutboundV1Channel::::new_outbound(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap(); assert!(node_a_chan.context.counterparty_forwarding_info.is_none()); assert_eq!(node_a_chan.context.holder_htlc_minimum_msat, 1); // the default assert!(node_a_chan.counterparty_forwarding_info().is_none()); @@ -8255,7 +8329,7 @@ mod tests { let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap()); let config = UserConfig::default(); - let node_a_chan = FundedChannel::::new_outbound(&feeest, &&keys_provider, &&keys_provider, + let node_a_chan = OutboundV1Channel::::new_outbound(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap(); let mut channel_type_features = ChannelTypeFeatures::only_static_remote_key(); @@ -8264,7 +8338,7 @@ mod tests { let mut open_channel_msg = node_a_chan.get_open_channel(genesis_block(network).header.block_hash()); open_channel_msg.channel_type = Some(channel_type_features); let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap()); - let res = FundedChannel::::new_from_req(&feeest, &&keys_provider, &&keys_provider, + let res = InboundV1Channel::::new_from_req(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, 42); assert!(res.is_ok()); @@ -8289,7 +8363,7 @@ mod tests { // It is not enough for just the initiator to signal `option_anchors_zero_fee_htlc_tx`, both // need to signal it. - let channel_a = FundedChannel::::new_outbound( + let channel_a = OutboundV1Channel::::new_outbound( &fee_estimator, &&keys_provider, &&keys_provider, node_id_b, &channelmanager::provided_init_features(&UserConfig::default()), 10000000, 100000, 42, &config, 0, 42 @@ -8300,13 +8374,13 @@ mod tests { expected_channel_type.set_static_remote_key_required(); expected_channel_type.set_anchors_zero_fee_htlc_tx_required(); - let channel_a = FundedChannel::::new_outbound( + let channel_a = OutboundV1Channel::::new_outbound( &fee_estimator, &&keys_provider, &&keys_provider, node_id_b, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42 ).unwrap(); let open_channel_msg = channel_a.get_open_channel(genesis_block(network).header.block_hash()); - let channel_b = FundedChannel::::new_from_req( + let channel_b = InboundV1Channel::::new_from_req( &fee_estimator, &&keys_provider, &&keys_provider, node_id_a, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, 42 @@ -8338,7 +8412,7 @@ mod tests { let raw_init_features = static_remote_key_required | simple_anchors_required; let init_features_with_simple_anchors = InitFeatures::from_le_bytes(raw_init_features.to_le_bytes().to_vec()); - let channel_a = FundedChannel::::new_outbound( + let channel_a = OutboundV1Channel::::new_outbound( &fee_estimator, &&keys_provider, &&keys_provider, node_id_b, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42 ).unwrap(); @@ -8349,7 +8423,7 @@ mod tests { // Since A supports both `static_remote_key` and `option_anchors`, but B only accepts // `static_remote_key`, it will fail the channel. - let channel_b = FundedChannel::::new_from_req( + let channel_b = InboundV1Channel::::new_from_req( &fee_estimator, &&keys_provider, &&keys_provider, node_id_a, &channelmanager::provided_channel_type_features(&config), &init_features_with_simple_anchors, &open_channel_msg, 7, &config, 0, &&logger, 42 @@ -8385,7 +8459,7 @@ mod tests { // First, we'll try to open a channel between A and B where A requests a channel type for // the original `option_anchors` feature (non zero fee htlc tx). This should be rejected by // B as it's not supported by LDK. - let channel_a = FundedChannel::::new_outbound( + let channel_a = OutboundV1Channel::::new_outbound( &fee_estimator, &&keys_provider, &&keys_provider, node_id_b, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42 ).unwrap(); @@ -8393,7 +8467,7 @@ mod tests { let mut open_channel_msg = channel_a.get_open_channel(genesis_block(network).header.block_hash()); open_channel_msg.channel_type = Some(simple_anchors_channel_type.clone()); - let res = FundedChannel::::new_from_req( + let res = InboundV1Channel::::new_from_req( &fee_estimator, &&keys_provider, &&keys_provider, node_id_a, &channelmanager::provided_channel_type_features(&config), &simple_anchors_init, &open_channel_msg, 7, &config, 0, &&logger, 42 @@ -8404,14 +8478,14 @@ mod tests { // `anchors_zero_fee_htlc_tx`. B is malicious and tries to downgrade the channel type to the // original `option_anchors` feature, which should be rejected by A as it's not supported by // LDK. - let mut channel_a = FundedChannel::::new_outbound( + let mut channel_a = OutboundV1Channel::::new_outbound( &fee_estimator, &&keys_provider, &&keys_provider, node_id_b, &simple_anchors_init, 10000000, 100000, 42, &config, 0, 42 ).unwrap(); let open_channel_msg = channel_a.get_open_channel(genesis_block(network).header.block_hash()); - let channel_b = FundedChannel::::new_from_req( + let channel_b = InboundV1Channel::::new_from_req( &fee_estimator, &&keys_provider, &&keys_provider, node_id_a, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, 42 diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 4b604937d9c..9eea9a43ae9 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -38,7 +38,7 @@ use crate::chain::transaction::{OutPoint, TransactionData}; // Since this struct is returned in `list_channels` methods, expose it here in case users want to // construct one themselves. use crate::ln::{inbound_payment, PaymentHash, PaymentPreimage, PaymentSecret}; -use crate::ln::channel::{FundedChannel, ChannelInterface, ChannelError, ChannelUpdateStatus, UpdateFulfillCommitFetch}; +use crate::ln::channel::{Channel, FundedChannel, ChannelInterface, ChannelError, ChannelUpdateStatus, UpdateFulfillCommitFetch}; use crate::ln::features::{ChannelFeatures, ChannelTypeFeatures, InitFeatures, NodeFeatures}; #[cfg(any(feature = "_test_utils", test))] use crate::ln::features::InvoiceFeatures; @@ -78,6 +78,8 @@ use core::ops::Deref; // Re-export this for use in the public API. pub use crate::ln::outbound_payment::{PaymentSendFailure, Retry, RetryableSendFailure}; +use super::channel::{OutboundV1Channel, InboundV1Channel}; + // We hold various information about HTLC relay in the HTLC objects in Channel itself: // // Upon receipt of an HTLC from a peer, we'll give it a PendingHTLCStatus indicating if it should @@ -499,7 +501,7 @@ pub(super) struct PeerState { /// Holds all channels where the peer is the counterparty. Once a channel has been assigned a /// `channel_id`, the `temporary_channel_id` key in the map is updated and is replaced by the /// `channel_id`. - pub(super) channel_by_id: HashMap<[u8; 32], FundedChannel>, + pub(super) channel_by_id: HashMap<[u8; 32], Channel>, /// The latest `InitFeatures` we heard from the peer. latest_features: InitFeatures, /// Messages to send to the peer - pushed to in the same lock that they are generated in (except @@ -1223,7 +1225,7 @@ impl ChannelDetails { self.short_channel_id.or(self.outbound_scid_alias) } - fn from_channel(channel: &FundedChannel, + fn from_channel(channel: &Channel, best_block_height: u32, latest_features: InitFeatures) -> Self { let balance = channel.get_available_balances(); @@ -1264,7 +1266,7 @@ impl ChannelDetails { force_close_spend_delay: channel.get_counterparty_selected_contest_delay(), is_outbound: channel.is_outbound(), is_channel_ready: channel.is_usable(), - is_usable: channel.is_live(), + is_usable: if let &Channel::Funded(ref channel) = channel { channel.is_live() } else { false }, is_public: channel.should_announce(), inbound_htlc_minimum_msat: Some(channel.get_holder_htlc_minimum_msat()), inbound_htlc_maximum_msat: channel.get_holder_htlc_maximum_msat(), @@ -1388,6 +1390,26 @@ macro_rules! update_maps_on_chan_removal { }} } +macro_rules! update_maps_on_chan_removal_ref { + ($self: expr, $channel: expr) => {{ + $self.id_to_peer.lock().unwrap().remove(&$channel.channel_id()); + let mut short_to_chan_info = $self.short_to_chan_info.write().unwrap(); + if let Some(short_id) = $channel.get_short_channel_id() { + short_to_chan_info.remove(&short_id); + } else { + // If the channel was never confirmed on-chain prior to its closure, remove the + // outbound SCID alias we used for it from the collision-prevention set. While we + // generally want to avoid ever re-using an outbound SCID alias across all channels, we + // also don't want a counterparty to be able to trivially cause a memory leak by simply + // opening a million channels with us which are closed before we ever reach the funding + // stage. + let alias_removed = $self.outbound_scid_aliases.lock().unwrap().remove(&$channel.outbound_scid_alias()); + debug_assert!(alias_removed); + } + short_to_chan_info.remove(&$channel.outbound_scid_alias()); + }} +} + /// Returns (boolean indicating if we should remove the Channel object from memory, a mapped error) macro_rules! convert_chan_err { ($self: ident, $err: expr, $channel: expr, $channel_id: expr) => { @@ -1401,9 +1423,13 @@ macro_rules! convert_chan_err { ChannelError::Close(msg) => { log_error!($self.logger, "Closing channel {} due to close-required error: {}", log_bytes!($channel_id[..]), msg); update_maps_on_chan_removal!($self, $channel); - let shutdown_res = $channel.force_shutdown(true); - (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, $channel.get_user_id(), - shutdown_res, $self.get_channel_update_for_broadcast(&$channel).ok())) + if let Channel::Funded(ref mut channel) = $channel { + (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, channel.get_user_id(), + channel.force_shutdown(true), $self.get_channel_update_for_broadcast(&channel).ok())) + } else { + (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, $channel.get_user_id(), + (None, vec![]), None)) + } }, } } @@ -1414,7 +1440,8 @@ macro_rules! break_chan_entry { match $res { Ok(res) => res, Err(e) => { - let (drop, res) = convert_chan_err!($self, e, $entry.get_mut(), $entry.key()); + let channel_id = $entry.key().clone(); + let (drop, res) = convert_chan_err!($self, e, $entry.get_mut(), &channel_id); if drop { $entry.remove_entry(); } @@ -1429,7 +1456,8 @@ macro_rules! try_chan_entry { match $res { Ok(res) => res, Err(e) => { - let (drop, res) = convert_chan_err!($self, e, $entry.get_mut(), $entry.key()); + let channel_id = $entry.key().clone(); + let (drop, res) = convert_chan_err!($self, e, $entry.get_mut(), &channel_id); if drop { $entry.remove_entry(); } @@ -1487,18 +1515,18 @@ macro_rules! emit_channel_ready_event { } macro_rules! handle_monitor_update_completion { - ($self: ident, $update_id: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr) => { { - let mut updates = $chan.monitor_updating_restored(&$self.logger, + ($self: ident, $update_id: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $funded_chan: expr) => { { + let mut updates = $funded_chan.monitor_updating_restored(&$self.logger, &$self.node_signer, $self.genesis_hash, &$self.default_configuration, $self.best_block.read().unwrap().height()); - let counterparty_node_id = $chan.get_counterparty_node_id(); - let channel_update = if updates.channel_ready.is_some() && $chan.is_usable() { + let counterparty_node_id = $funded_chan.get_counterparty_node_id(); + let channel_update = if updates.channel_ready.is_some() && $funded_chan.is_usable() { // We only send a channel_update in the case where we are just now sending a // channel_ready and the channel is in a usable state. We may re-send a // channel_update later through the announcement_signatures process for public // channels, but there's no reason not to just inform our counterparty of our fees // now. - if let Ok(msg) = $self.get_channel_update_for_unicast($chan) { + if let Ok(msg) = $self.get_channel_update_for_unicast($funded_chan) { Some(events::MessageSendEvent::SendChannelUpdate { node_id: counterparty_node_id, msg, @@ -1507,10 +1535,10 @@ macro_rules! handle_monitor_update_completion { } else { None }; let update_actions = $peer_state.monitor_update_blocked_actions - .remove(&$chan.channel_id()).unwrap_or(Vec::new()); + .remove(&$funded_chan.channel_id()).unwrap_or(Vec::new()); let htlc_forwards = $self.handle_channel_resumption( - &mut $peer_state.pending_msg_events, $chan, updates.raa, + &mut $peer_state.pending_msg_events, $funded_chan, updates.raa, updates.commitment_update, updates.order, updates.accepted_htlcs, updates.funding_broadcastable, updates.channel_ready, updates.announcement_sigs); @@ -1518,7 +1546,7 @@ macro_rules! handle_monitor_update_completion { $peer_state.pending_msg_events.push(upd); } - let channel_id = $chan.channel_id(); + let channel_id = $funded_chan.channel_id(); core::mem::drop($peer_state_lock); core::mem::drop($per_peer_state_lock); @@ -1536,41 +1564,45 @@ macro_rules! handle_monitor_update_completion { } macro_rules! handle_new_monitor_update { - ($self: ident, $update_res: expr, $update_id: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr, MANUALLY_REMOVING, $remove: expr) => { { + ($self: ident, $update_res: expr, $update_id: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $funded_chan: expr, MANUALLY_REMOVING, $remove: expr) => { { // update_maps_on_chan_removal needs to be able to take id_to_peer, so make sure we can in // any case so that it won't deadlock. debug_assert!($self.id_to_peer.try_lock().is_ok()); match $update_res { ChannelMonitorUpdateStatus::InProgress => { log_debug!($self.logger, "ChannelMonitor update for {} in flight, holding messages until the update completes.", - log_bytes!($chan.channel_id()[..])); + log_bytes!($funded_chan.channel_id()[..])); Ok(()) }, ChannelMonitorUpdateStatus::PermanentFailure => { log_error!($self.logger, "Closing channel {} due to monitor update ChannelMonitorUpdateStatus::PermanentFailure", - log_bytes!($chan.channel_id()[..])); - update_maps_on_chan_removal!($self, $chan); + log_bytes!($funded_chan.channel_id()[..])); + update_maps_on_chan_removal!($self, $funded_chan); let res: Result<(), _> = Err(MsgHandleErrInternal::from_finish_shutdown( - "ChannelMonitor storage failure".to_owned(), $chan.channel_id(), - $chan.get_user_id(), $chan.force_shutdown(false), - $self.get_channel_update_for_broadcast(&$chan).ok())); + "ChannelMonitor storage failure".to_owned(), $funded_chan.channel_id(), + $funded_chan.get_user_id(), $funded_chan.force_shutdown(false), + $self.get_channel_update_for_broadcast($funded_chan).ok())); $remove; res }, ChannelMonitorUpdateStatus::Completed => { - if ($update_id == 0 || $chan.get_next_monitor_update() + if ($update_id == 0 || $funded_chan.get_next_monitor_update() .expect("We can't be processing a monitor update if it isn't queued") .update_id == $update_id) && - $chan.get_latest_monitor_update_id() == $update_id + $funded_chan.get_latest_monitor_update_id() == $update_id { - handle_monitor_update_completion!($self, $update_id, $peer_state_lock, $peer_state, $per_peer_state_lock, $chan); + handle_monitor_update_completion!($self, $update_id, $peer_state_lock, $peer_state, $per_peer_state_lock, $funded_chan); } Ok(()) }, } } }; ($self: ident, $update_res: expr, $update_id: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan_entry: expr) => { - handle_new_monitor_update!($self, $update_res, $update_id, $peer_state_lock, $peer_state, $per_peer_state_lock, $chan_entry.get_mut(), MANUALLY_REMOVING, $chan_entry.remove_entry()) + if let Channel::Funded(chan) = $chan_entry.get_mut() { + handle_new_monitor_update!($self, $update_res, $update_id, $peer_state_lock, $peer_state, $per_peer_state_lock, chan, MANUALLY_REMOVING, $chan_entry.remove_entry()) + } else { + Ok(()) + } } } @@ -1712,7 +1744,7 @@ where let outbound_scid_alias = self.create_and_insert_outbound_scid_alias(); let their_features = &peer_state.latest_features; let config = if override_config.is_some() { override_config.as_ref().unwrap() } else { &self.default_configuration }; - match FundedChannel::new_outbound(&self.fee_estimator, &self.entropy_source, &self.signer_provider, their_network_key, + match OutboundV1Channel::new_outbound(&self.fee_estimator, &self.entropy_source, &self.signer_provider, their_network_key, their_features, channel_value_satoshis, push_msat, user_channel_id, config, self.best_block.read().unwrap().height(), outbound_scid_alias) { @@ -1734,7 +1766,7 @@ where panic!("RNG is bad???"); } }, - hash_map::Entry::Vacant(entry) => { entry.insert(channel); } + hash_map::Entry::Vacant(entry) => { entry.insert(Channel::OutboundV1(channel)); } } peer_state.pending_msg_events.push(events::MessageSendEvent::SendOpenChannel { @@ -1744,7 +1776,7 @@ where Ok(temporary_channel_id) } - fn list_channels_with_filter::Signer>)) -> bool + Copy>(&self, f: Fn) -> Vec { + fn list_channels_with_filter::Signer>)) -> bool + Copy>(&self, f: Fn) -> Vec { // Allocate our best estimate of the number of channels we have in the `res` // Vec. Sadly the `short_to_chan_info` map doesn't cover channels without // a scid or a scid alias, and the `id_to_peer` shouldn't be used outside @@ -1784,7 +1816,7 @@ where // Note we use is_live here instead of usable which leads to somewhat confused // internal/external nomenclature, but that's ok cause that's probably what the user // really wanted anyway. - self.list_channels_with_filter(|&(_, ref channel)| channel.is_live()) + self.list_channels_with_filter(|&(_, channel)| if let Channel::Funded(channel) = channel { channel.is_live() } else { false }) } /// Gets the list of channels we have with a given counterparty, in random order. @@ -1852,7 +1884,7 @@ where fn close_channel_internal(&self, channel_id: &[u8; 32], counterparty_node_id: &PublicKey, target_feerate_sats_per_1000_weight: Option) -> Result<(), APIError> { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); - let mut failed_htlcs: Vec<(HTLCSource, PaymentHash)>; + let mut failed_htlcs: Vec<(HTLCSource, PaymentHash)> = vec![]; let result: Result<(), _> = loop { let per_peer_state = self.per_peer_state.read().unwrap(); @@ -1863,37 +1895,46 @@ where let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(channel_id.clone()) { hash_map::Entry::Occupied(mut chan_entry) => { - let funding_txo_opt = chan_entry.get().get_funding_txo(); - let their_features = &peer_state.latest_features; - let (shutdown_msg, mut monitor_update_opt, htlcs) = chan_entry.get_mut() - .get_shutdown(&self.signer_provider, their_features, target_feerate_sats_per_1000_weight)?; - failed_htlcs = htlcs; - - // We can send the `shutdown` message before updating the `ChannelMonitor` - // here as we don't need the monitor update to complete until we send a - // `shutdown_signed`, which we'll delay if we're pending a monitor update. - peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown { - node_id: *counterparty_node_id, - msg: shutdown_msg, - }); + if let Channel::Funded(channel) = chan_entry.get_mut() { + let funding_txo_opt = channel.get_funding_txo(); + let their_features = &peer_state.latest_features; + let (shutdown_msg, mut monitor_update_opt, htlcs) = channel + .get_shutdown(&self.signer_provider, their_features, target_feerate_sats_per_1000_weight)?; + failed_htlcs = htlcs; - // Update the monitor with the shutdown script if necessary. - if let Some(monitor_update) = monitor_update_opt.take() { - let update_id = monitor_update.update_id; - let update_res = self.chain_monitor.update_channel(funding_txo_opt.unwrap(), monitor_update); - break handle_new_monitor_update!(self, update_res, update_id, peer_state_lock, peer_state, per_peer_state, chan_entry); - } + // We can send the `shutdown` message before updating the `ChannelMonitor` + // here as we don't need the monitor update to complete until we send a + // `shutdown_signed`, which we'll delay if we're pending a monitor update. + peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown { + node_id: *counterparty_node_id, + msg: shutdown_msg, + }); - if chan_entry.get().is_shutdown() { - let channel = remove_channel!(self, chan_entry); - if let Ok(channel_update) = self.get_channel_update_for_broadcast(&channel) { - peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { - msg: channel_update - }); + // Update the monitor with the shutdown script if necessary. + if let Some(monitor_update) = monitor_update_opt.take() { + let update_id = monitor_update.update_id; + let update_res = self.chain_monitor.update_channel(funding_txo_opt.unwrap(), monitor_update); + break handle_new_monitor_update!(self, update_res, update_id, peer_state_lock, peer_state, per_peer_state, chan_entry); + } + + if channel.is_shutdown() { + if let Channel::Funded(channel) = remove_channel!(self, chan_entry) { + if let Ok(channel_update) = self.get_channel_update_for_broadcast(&channel) { + peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { + msg: channel_update + }); + } + self.issue_channel_close_events(&channel, ClosureReason::HolderForceClosed); + } else { + unreachable!(); // We checked that the channel was a `FundedChannel` earlier. + } } - self.issue_channel_close_events(&channel, ClosureReason::HolderForceClosed); + break Ok(()); + } else { + // In this case we do not have a funded channel yet, so just remove it from the map. + remove_channel!(self, chan_entry); + break Ok(()); } - break Ok(()); }, hash_map::Entry::Vacant(_) => return Err(APIError::ChannelUnavailable{err: format!("Channel with id {} not found for the passed counterparty node_id {}", log_bytes!(*channel_id), counterparty_node_id) }) } @@ -1983,10 +2024,12 @@ where let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; if let hash_map::Entry::Occupied(chan) = peer_state.channel_by_id.entry(channel_id.clone()) { - if let Some(peer_msg) = peer_msg { - self.issue_channel_close_events(chan.get(),ClosureReason::CounterpartyForceClosed { peer_msg: peer_msg.to_string() }); - } else { - self.issue_channel_close_events(chan.get(),ClosureReason::HolderForceClosed); + if let Channel::Funded(chan) = chan.get() { + if let Some(peer_msg) = peer_msg { + self.issue_channel_close_events(chan, ClosureReason::CounterpartyForceClosed { peer_msg: peer_msg.to_string() }); + } else { + self.issue_channel_close_events(chan, ClosureReason::HolderForceClosed); + } } remove_channel!(self, chan) } else { @@ -1994,12 +2037,15 @@ where } }; log_error!(self.logger, "Force-closing channel {}", log_bytes!(channel_id[..])); - self.finish_force_close_channel(chan.force_shutdown(broadcast)); - if let Ok(update) = self.get_channel_update_for_broadcast(&chan) { - let mut peer_state = peer_state_mutex.lock().unwrap(); - peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { - msg: update - }); + self.finish_force_close_channel(if let Channel::Funded(ref mut chan) = chan { + chan.force_shutdown(broadcast) } else { (None, vec![]) }); + if let Channel::Funded(ref chan) = chan { + if let Ok(update) = self.get_channel_update_for_broadcast(&chan) { + let mut peer_state = peer_state_mutex.lock().unwrap(); + peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { + msg: update + }); + } } Ok(chan.get_counterparty_node_id()) @@ -2294,7 +2340,12 @@ where // have no consistency guarantees. break Some(("Don't have available channel for forwarding as requested.", 0x4000 | 10, None)); }, - Some(chan) => chan + Some(chan) => if let Channel::Funded(chan) = chan { + chan + } else { + // The channel is not yet funded. + break Some(("Forwarding channel is not in a ready state.", 0x1000 | 20, None)); + } }; if !chan.should_announce() && !self.default_configuration.accept_forwards_to_priv_channels { // Note that the behavior here should be identical to the above block - we @@ -2492,37 +2543,39 @@ where .ok_or_else(|| APIError::ChannelUnavailable{err: "No peer matching the path's first hop found!".to_owned() })?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; - if let hash_map::Entry::Occupied(mut chan) = peer_state.channel_by_id.entry(id) { - if !chan.get().is_live() { - return Err(APIError::ChannelUnavailable{err: "Peer for first hop currently disconnected".to_owned()}); - } - let funding_txo = chan.get().get_funding_txo().unwrap(); - let send_res = chan.get_mut().send_htlc_and_commit(htlc_msat, payment_hash.clone(), - htlc_cltv, HTLCSource::OutboundRoute { - path: path.clone(), - session_priv: session_priv.clone(), - first_hop_htlc_msat: htlc_msat, - payment_id, - payment_secret: payment_secret.clone(), - }, onion_packet, &self.logger); - match break_chan_entry!(self, send_res, chan) { - Some(monitor_update) => { - let update_id = monitor_update.update_id; - let update_res = self.chain_monitor.update_channel(funding_txo, monitor_update); - if let Err(e) = handle_new_monitor_update!(self, update_res, update_id, peer_state_lock, peer_state, per_peer_state, chan) { - break Err(e); - } - if update_res == ChannelMonitorUpdateStatus::InProgress { - // Note that MonitorUpdateInProgress here indicates (per function - // docs) that we will resend the commitment update once monitor - // updating completes. Therefore, we must return an error - // indicating that it is unsafe to retry the payment wholesale, - // which we do in the send_payment check for - // MonitorUpdateInProgress, below. - return Err(APIError::MonitorUpdateInProgress); - } - }, - None => { }, + if let hash_map::Entry::Occupied(mut chan_entry) = peer_state.channel_by_id.entry(id) { + if let Channel::Funded(chan) = chan_entry.get_mut() { + if !chan.is_live() { + return Err(APIError::ChannelUnavailable{err: "Peer for first hop currently disconnected".to_owned()}); + } + let funding_txo = chan.get_funding_txo().unwrap(); + let send_res = chan.send_htlc_and_commit(htlc_msat, payment_hash.clone(), + htlc_cltv, HTLCSource::OutboundRoute { + path: path.clone(), + session_priv: session_priv.clone(), + first_hop_htlc_msat: htlc_msat, + payment_id, + payment_secret: payment_secret.clone(), + }, onion_packet, &self.logger); + match break_chan_entry!(self, send_res, chan_entry) { + Some(monitor_update) => { + let update_id = monitor_update.update_id; + let update_res = self.chain_monitor.update_channel(funding_txo, monitor_update); + if let Err(e) = handle_new_monitor_update!(self, update_res, update_id, peer_state_lock, peer_state, per_peer_state, chan_entry) { + break Err(e); + } + if update_res == ChannelMonitorUpdateStatus::InProgress { + // Note that MonitorUpdateInProgress here indicates (per function + // docs) that we will resend the commitment update once monitor + // updating completes. Therefore, we must return an error + // indicating that it is unsafe to retry the payment wholesale, + // which we do in the send_payment check for + // MonitorUpdateInProgress, below. + return Err(APIError::MonitorUpdateInProgress); + } + }, + None => { }, + } } } else { // The channel was likely removed after we fetched the id from the @@ -2727,7 +2780,7 @@ where /// Handles the generation of a funding transaction, optionally (for tests) with a function /// which checks the correctness of the funding transaction given the associated channel. - fn funding_transaction_generated_intern::Signer>, &Transaction) -> Result>( + fn funding_transaction_generated_intern::Signer>, &Transaction) -> Result>( &self, temporary_channel_id: &[u8; 32], counterparty_node_id: &PublicKey, funding_transaction: Transaction, find_funding_output: FundingOutput ) -> Result<(), APIError> { let per_peer_state = self.per_peer_state.read().unwrap(); @@ -2737,22 +2790,23 @@ where let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; let (chan, msg) = { - let (res, chan) = { - match peer_state.channel_by_id.remove(temporary_channel_id) { - Some(mut chan) => { + let channel = peer_state.channel_by_id.remove(temporary_channel_id); + let res = { + match channel { + Some(Channel::OutboundV1(chan)) => { let funding_txo = find_funding_output(&chan, &funding_transaction)?; - (chan.get_outbound_funding_created(funding_transaction, funding_txo, &self.logger) - .map_err(|e| if let ChannelError::Close(msg) = e { - MsgHandleErrInternal::from_finish_shutdown(msg, chan.channel_id(), chan.get_user_id(), chan.force_shutdown(true), None) - } else { unreachable!(); }) - , chan) + chan.get_outbound_funding_created(funding_transaction, funding_txo, &self.logger) + .map_err(|(mut chan, e)| if let ChannelError::Close(msg) = e { + MsgHandleErrInternal::from_finish_shutdown(msg, chan.channel_id(), chan.get_user_id(), chan.force_shutdown(true), None) + } else { unreachable!(); }) }, + Some(_) => { return Err(APIError::APIMisuseError { err: format!("Channel with id {} is not a pending outbound V1 channel", log_bytes!(*temporary_channel_id)) }) }, None => { return Err(APIError::ChannelUnavailable { err: format!("Channel with id {} not found for the passed counterparty node_id {}", log_bytes!(*temporary_channel_id), counterparty_node_id) }) }, } }; - match handle_error!(self, res, chan.get_counterparty_node_id()) { - Ok(funding_msg) => { + match handle_error!(self, res, *counterparty_node_id) { + Ok((chan, funding_msg)) => { (chan, funding_msg) }, Err(_) => { return Err(APIError::ChannelUnavailable { @@ -2774,7 +2828,7 @@ where if id_to_peer.insert(chan.channel_id(), chan.get_counterparty_node_id()).is_some() { panic!("id_to_peer map already contained funding txid, which shouldn't be possible"); } - e.insert(chan); + e.insert(Channel::Funded(chan)); } } Ok(()) @@ -2916,13 +2970,15 @@ where if !channel.update_config(config) { continue; } - if let Ok(msg) = self.get_channel_update_for_broadcast(channel) { - peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { msg }); - } else if let Ok(msg) = self.get_channel_update_for_unicast(channel) { - peer_state.pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate { - node_id: channel.get_counterparty_node_id(), - msg, - }); + if let Channel::Funded(channel) = channel { + if let Ok(msg) = self.get_channel_update_for_broadcast(channel) { + peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { msg }); + } else if let Ok(msg) = self.get_channel_update_for_unicast(channel) { + peer_state.pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate { + node_id: channel.get_counterparty_node_id(), + msg, + }); + } } } Ok(()) @@ -3162,61 +3218,65 @@ where continue; }, hash_map::Entry::Occupied(mut chan) => { - for forward_info in pending_forwards.drain(..) { - match forward_info { - HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo { - prev_short_channel_id, prev_htlc_id, prev_funding_outpoint, prev_user_channel_id: _, - forward_info: PendingHTLCInfo { - incoming_shared_secret, payment_hash, outgoing_amt_msat, outgoing_cltv_value, - routing: PendingHTLCRouting::Forward { onion_packet, .. }, incoming_amt_msat: _, - }, - }) => { - log_trace!(self.logger, "Adding HTLC from short id {} with payment_hash {} to channel with short id {} after delay", prev_short_channel_id, log_bytes!(payment_hash.0), short_chan_id); - let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData { - short_channel_id: prev_short_channel_id, - outpoint: prev_funding_outpoint, - htlc_id: prev_htlc_id, - incoming_packet_shared_secret: incoming_shared_secret, - // Phantom payments are only PendingHTLCRouting::Receive. - phantom_shared_secret: None, - }); - if let Err(e) = chan.get_mut().queue_add_htlc(outgoing_amt_msat, - payment_hash, outgoing_cltv_value, htlc_source.clone(), - onion_packet, &self.logger) - { - if let ChannelError::Ignore(msg) = e { - log_trace!(self.logger, "Failed to forward HTLC with payment_hash {}: {}", log_bytes!(payment_hash.0), msg); - } else { - panic!("Stated return value requirements in send_htlc() were not met"); + if let Channel::Funded(chan) = chan.get_mut() { + for forward_info in pending_forwards.drain(..) { + match forward_info { + HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo { + prev_short_channel_id, prev_htlc_id, prev_funding_outpoint, prev_user_channel_id: _, + forward_info: PendingHTLCInfo { + incoming_shared_secret, payment_hash, outgoing_amt_msat, outgoing_cltv_value, + routing: PendingHTLCRouting::Forward { onion_packet, .. }, incoming_amt_msat: _, + }, + }) => { + log_trace!(self.logger, "Adding HTLC from short id {} with payment_hash {} to channel with short id {} after delay", prev_short_channel_id, log_bytes!(payment_hash.0), short_chan_id); + let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData { + short_channel_id: prev_short_channel_id, + outpoint: prev_funding_outpoint, + htlc_id: prev_htlc_id, + incoming_packet_shared_secret: incoming_shared_secret, + // Phantom payments are only PendingHTLCRouting::Receive. + phantom_shared_secret: None, + }); + if let Err(e) = chan.queue_add_htlc(outgoing_amt_msat, + payment_hash, outgoing_cltv_value, htlc_source.clone(), + onion_packet, &self.logger) + { + if let ChannelError::Ignore(msg) = e { + log_trace!(self.logger, "Failed to forward HTLC with payment_hash {}: {}", log_bytes!(payment_hash.0), msg); + } else { + panic!("Stated return value requirements in send_htlc() were not met"); + } + let (failure_code, data) = self.get_htlc_temp_fail_err_and_data(0x1000|7, short_chan_id, chan); + failed_forwards.push((htlc_source, payment_hash, + HTLCFailReason::reason(failure_code, data), + HTLCDestination::NextHopChannel { node_id: Some(chan.get_counterparty_node_id()), channel_id: forward_chan_id } + )); + continue; } - let (failure_code, data) = self.get_htlc_temp_fail_err_and_data(0x1000|7, short_chan_id, chan.get()); - failed_forwards.push((htlc_source, payment_hash, - HTLCFailReason::reason(failure_code, data), - HTLCDestination::NextHopChannel { node_id: Some(chan.get().get_counterparty_node_id()), channel_id: forward_chan_id } - )); - continue; - } - }, - HTLCForwardInfo::AddHTLC { .. } => { - panic!("short_channel_id != 0 should imply any pending_forward entries are of type Forward"); - }, - HTLCForwardInfo::FailHTLC { htlc_id, err_packet } => { - log_trace!(self.logger, "Failing HTLC back to channel with short id {} (backward HTLC ID {}) after delay", short_chan_id, htlc_id); - if let Err(e) = chan.get_mut().queue_fail_htlc( - htlc_id, err_packet, &self.logger - ) { - if let ChannelError::Ignore(msg) = e { - log_trace!(self.logger, "Failed to fail HTLC with ID {} backwards to short_id {}: {}", htlc_id, short_chan_id, msg); - } else { - panic!("Stated return value requirements in queue_fail_htlc() were not met"); + }, + HTLCForwardInfo::AddHTLC { .. } => { + panic!("short_channel_id != 0 should imply any pending_forward entries are of type Forward"); + }, + HTLCForwardInfo::FailHTLC { htlc_id, err_packet } => { + log_trace!(self.logger, "Failing HTLC back to channel with short id {} (backward HTLC ID {}) after delay", short_chan_id, htlc_id); + if let Err(e) = chan.queue_fail_htlc( + htlc_id, err_packet, &self.logger + ) { + if let ChannelError::Ignore(msg) = e { + log_trace!(self.logger, "Failed to fail HTLC with ID {} backwards to short_id {}: {}", htlc_id, short_chan_id, msg); + } else { + panic!("Stated return value requirements in queue_fail_htlc() were not met"); + } + // fail-backs are best-effort, we probably already have one + // pending, and if not that's OK, if not, the channel is on + // the chain and sending the HTLC-Timeout is their problem. + continue; } - // fail-backs are best-effort, we probably already have one - // pending, and if not that's OK, if not, the channel is on - // the chain and sending the HTLC-Timeout is their problem. - continue; - } - }, + }, + } } + } else { + log_trace!(self.logger, "Channel with id {} is not yet funded and hence not yet established. Impossible to forward.", log_bytes!(chan.get().channel_id())); } } } @@ -3565,44 +3625,51 @@ where let pending_msg_events = &mut peer_state.pending_msg_events; let counterparty_node_id = *counterparty_node_id; peer_state.channel_by_id.retain(|chan_id, chan| { - let chan_needs_persist = self.update_channel_fee(chan_id, chan, new_feerate); - if chan_needs_persist == NotifyOption::DoPersist { should_persist = NotifyOption::DoPersist; } - - if let Err(e) = chan.timer_check_closing_negotiation_progress() { - let (needs_close, err) = convert_chan_err!(self, e, chan, chan_id); - handle_errors.push((Err(err), counterparty_node_id)); - if needs_close { return false; } - } + if let Channel::Funded(chan) = chan { + let chan_needs_persist = self.update_channel_fee(chan_id, chan, new_feerate); + if chan_needs_persist == NotifyOption::DoPersist { should_persist = NotifyOption::DoPersist; } + + if let Err(ChannelError::Close(msg)) = chan.timer_check_closing_negotiation_progress() { + log_error!(self.logger, "Closing channel {} due to close-required error: {}", log_bytes!(chan_id[..]), msg); + update_maps_on_chan_removal_ref!(self, chan); + let err = MsgHandleErrInternal::from_finish_shutdown(msg, *chan_id, chan.get_user_id(), + chan.force_shutdown(true), self.get_channel_update_for_broadcast(&chan).ok()); + handle_errors.push((Err(err), counterparty_node_id)); + return false; + } - match chan.channel_update_status() { - ChannelUpdateStatus::Enabled if !chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::DisabledStaged), - ChannelUpdateStatus::Disabled if chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::EnabledStaged), - ChannelUpdateStatus::DisabledStaged if chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::Enabled), - ChannelUpdateStatus::EnabledStaged if !chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::Disabled), - ChannelUpdateStatus::DisabledStaged if !chan.is_live() => { - if let Ok(update) = self.get_channel_update_for_broadcast(&chan) { - pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { - msg: update - }); - } - should_persist = NotifyOption::DoPersist; - chan.set_channel_update_status(ChannelUpdateStatus::Disabled); - }, - ChannelUpdateStatus::EnabledStaged if chan.is_live() => { - if let Ok(update) = self.get_channel_update_for_broadcast(&chan) { - pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { - msg: update - }); - } - should_persist = NotifyOption::DoPersist; - chan.set_channel_update_status(ChannelUpdateStatus::Enabled); - }, - _ => {}, - } + match chan.channel_update_status() { + ChannelUpdateStatus::Enabled if !chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::DisabledStaged), + ChannelUpdateStatus::Disabled if chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::EnabledStaged), + ChannelUpdateStatus::DisabledStaged if chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::Enabled), + ChannelUpdateStatus::EnabledStaged if !chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::Disabled), + ChannelUpdateStatus::DisabledStaged if !chan.is_live() => { + if let Ok(update) = self.get_channel_update_for_broadcast(&chan) { + pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { + msg: update + }); + } + should_persist = NotifyOption::DoPersist; + chan.set_channel_update_status(ChannelUpdateStatus::Disabled); + }, + ChannelUpdateStatus::EnabledStaged if chan.is_live() => { + if let Ok(update) = self.get_channel_update_for_broadcast(&chan) { + pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { + msg: update + }); + } + should_persist = NotifyOption::DoPersist; + chan.set_channel_update_status(ChannelUpdateStatus::Enabled); + }, + _ => {}, + } - chan.maybe_expire_prev_config(); + chan.maybe_expire_prev_config(); - true + true + } else { + true + } }); if peer_state.ok_to_remove(true) { pending_peers_awaiting_removal.push(counterparty_node_id); @@ -3792,7 +3859,11 @@ where let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(channel_id) { hash_map::Entry::Occupied(chan_entry) => { - self.get_htlc_inbound_temp_fail_err_and_data(0x1000|7, &chan_entry.get()) + if let Channel::Funded(chan) = chan_entry.get() { + self.get_htlc_inbound_temp_fail_err_and_data(0x1000|7, chan) + } else { + (0x4000|10, Vec::new()) + } }, hash_map::Entry::Vacant(_) => (0x4000|10, Vec::new()) } @@ -4039,26 +4110,28 @@ where if peer_state_opt.is_some() { let mut peer_state_lock = peer_state_opt.unwrap(); let peer_state = &mut *peer_state_lock; - if let hash_map::Entry::Occupied(mut chan) = peer_state.channel_by_id.entry(chan_id) { - let counterparty_node_id = chan.get().get_counterparty_node_id(); - let fulfill_res = chan.get_mut().get_update_fulfill_htlc_and_commit(prev_hop.htlc_id, payment_preimage, &self.logger); - - if let UpdateFulfillCommitFetch::NewClaim { htlc_value_msat, monitor_update } = fulfill_res { - if let Some(action) = completion_action(Some(htlc_value_msat)) { - log_trace!(self.logger, "Tracking monitor update completion action for channel {}: {:?}", - log_bytes!(chan_id), action); - peer_state.monitor_update_blocked_actions.entry(chan_id).or_insert(Vec::new()).push(action); - } - let update_id = monitor_update.update_id; - let update_res = self.chain_monitor.update_channel(prev_hop.outpoint, monitor_update); - let res = handle_new_monitor_update!(self, update_res, update_id, peer_state_lock, - peer_state, per_peer_state, chan); - if let Err(e) = res { - // TODO: This is a *critical* error - we probably updated the outbound edge - // of the HTLC's monitor with a preimage. We should retry this monitor - // update over and over again until morale improves. - log_error!(self.logger, "Failed to update channel monitor with preimage {:?}", payment_preimage); - return Err((counterparty_node_id, e)); + if let hash_map::Entry::Occupied(mut chan_entry) = peer_state.channel_by_id.entry(chan_id) { + if let Channel::Funded(chan) = chan_entry.get_mut() { + let counterparty_node_id = chan.get_counterparty_node_id(); + let fulfill_res = chan.get_update_fulfill_htlc_and_commit(prev_hop.htlc_id, payment_preimage, &self.logger); + + if let UpdateFulfillCommitFetch::NewClaim { htlc_value_msat, monitor_update } = fulfill_res { + if let Some(action) = completion_action(Some(htlc_value_msat)) { + log_trace!(self.logger, "Tracking monitor update completion action for channel {}: {:?}", + log_bytes!(chan_id), action); + peer_state.monitor_update_blocked_actions.entry(chan_id).or_insert(Vec::new()).push(action); + } + let update_id = monitor_update.update_id; + let update_res = self.chain_monitor.update_channel(prev_hop.outpoint, monitor_update); + let res = handle_new_monitor_update!(self, update_res, update_id, peer_state_lock, + peer_state, per_peer_state, chan_entry); + if let Err(e) = res { + // TODO: This is a *critical* error - we probably updated the outbound edge + // of the HTLC's monitor with a preimage. We should retry this monitor + // update over and over again until morale improves. + log_error!(self.logger, "Failed to update channel monitor with preimage {:?}", payment_preimage); + return Err((counterparty_node_id, e)); + } } } return Ok(()); @@ -4248,12 +4321,14 @@ where hash_map::Entry::Vacant(_) => return, } }; - log_trace!(self.logger, "ChannelMonitor updated to {}. Current highest is {}", - highest_applied_update_id, channel.get().get_latest_monitor_update_id()); - if !channel.get().is_awaiting_monitor_update() || channel.get().get_latest_monitor_update_id() != highest_applied_update_id { - return; + if let Channel::Funded(channel) = channel.get_mut() { + log_trace!(self.logger, "ChannelMonitor updated to {}. Current highest is {}", + highest_applied_update_id, channel.get_latest_monitor_update_id()); + if !channel.is_awaiting_monitor_update() || channel.get_latest_monitor_update_id() != highest_applied_update_id { + return; + } + handle_monitor_update_completion!(self, highest_applied_update_id, peer_state_lock, peer_state, per_peer_state, channel); } - handle_monitor_update_completion!(self, highest_applied_update_id, peer_state_lock, peer_state, per_peer_state, channel.get_mut()); } /// Accepts a request to open a channel after a [`Event::OpenChannelRequest`]. @@ -4309,43 +4384,45 @@ where let peer_state = &mut *peer_state_lock; let is_only_peer_channel = peer_state.channel_by_id.len() == 1; match peer_state.channel_by_id.entry(temporary_channel_id.clone()) { - hash_map::Entry::Occupied(mut channel) => { - if !channel.get().inbound_is_awaiting_accept() { - return Err(APIError::APIMisuseError { err: "The channel isn't currently awaiting to be accepted.".to_owned() }); - } - if accept_0conf { - channel.get_mut().set_0conf(); - } else if channel.get().get_channel_type().requires_zero_conf() { - let send_msg_err_event = events::MessageSendEvent::HandleError { - node_id: channel.get().get_counterparty_node_id(), - action: msgs::ErrorAction::SendErrorMessage{ - msg: msgs::ErrorMessage { channel_id: temporary_channel_id.clone(), data: "No zero confirmation channels accepted".to_owned(), } - } - }; - peer_state.pending_msg_events.push(send_msg_err_event); - let _ = remove_channel!(self, channel); - return Err(APIError::APIMisuseError { err: "Please use accept_inbound_channel_from_trusted_peer_0conf to accept channels with zero confirmations.".to_owned() }); - } else { - // If this peer already has some channels, a new channel won't increase our number of peers - // with unfunded channels, so as long as we aren't over the maximum number of unfunded - // channels per-peer we can accept channels from a peer with existing ones. - if is_only_peer_channel && peers_without_funded_channels >= MAX_UNFUNDED_CHANNEL_PEERS { + hash_map::Entry::Occupied(mut channel_entry) => { + if let Channel::InboundV1(channel) = channel_entry.get_mut() { + if !channel.inbound_is_awaiting_accept() { + return Err(APIError::APIMisuseError { err: "The channel isn't currently awaiting to be accepted.".to_owned() }); + } + if accept_0conf { + channel.set_0conf(); + } else if channel.get_channel_type().requires_zero_conf() { let send_msg_err_event = events::MessageSendEvent::HandleError { - node_id: channel.get().get_counterparty_node_id(), + node_id: channel.get_counterparty_node_id(), action: msgs::ErrorAction::SendErrorMessage{ - msg: msgs::ErrorMessage { channel_id: temporary_channel_id.clone(), data: "Have too many peers with unfunded channels, not accepting new ones".to_owned(), } + msg: msgs::ErrorMessage { channel_id: temporary_channel_id.clone(), data: "No zero confirmation channels accepted".to_owned(), } } }; peer_state.pending_msg_events.push(send_msg_err_event); - let _ = remove_channel!(self, channel); - return Err(APIError::APIMisuseError { err: "Too many peers with unfunded channels, refusing to accept new ones".to_owned() }); + let _ = remove_channel!(self, channel_entry); + return Err(APIError::APIMisuseError { err: "Please use accept_inbound_channel_from_trusted_peer_0conf to accept channels with zero confirmations.".to_owned() }); + } else { + // If this peer already has some channels, a new channel won't increase our number of peers + // with unfunded channels, so as long as we aren't over the maximum number of unfunded + // channels per-peer we can accept channels from a peer with existing ones. + if is_only_peer_channel && peers_without_funded_channels >= MAX_UNFUNDED_CHANNEL_PEERS { + let send_msg_err_event = events::MessageSendEvent::HandleError { + node_id: channel.get_counterparty_node_id(), + action: msgs::ErrorAction::SendErrorMessage{ + msg: msgs::ErrorMessage { channel_id: temporary_channel_id.clone(), data: "Have too many peers with unfunded channels, not accepting new ones".to_owned(), } + } + }; + peer_state.pending_msg_events.push(send_msg_err_event); + let _ = remove_channel!(self, channel_entry); + return Err(APIError::APIMisuseError { err: "Too many peers with unfunded channels, refusing to accept new ones".to_owned() }); + } } - } - peer_state.pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel { - node_id: channel.get().get_counterparty_node_id(), - msg: channel.get_mut().accept_inbound_channel(user_channel_id), - }); + peer_state.pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel { + node_id: channel.get_counterparty_node_id(), + msg: channel.accept_inbound_channel(user_channel_id), + }); + } } hash_map::Entry::Vacant(_) => { return Err(APIError::ChannelUnavailable { err: format!("Channel with id {} not found for the passed counterparty node_id {}", log_bytes!(*temporary_channel_id), counterparty_node_id) }); @@ -4438,7 +4515,7 @@ where msg.temporary_channel_id.clone())); } - let mut channel = match FundedChannel::new_from_req(&self.fee_estimator, &self.entropy_source, &self.signer_provider, + let mut channel = match InboundV1Channel::new_from_req(&self.fee_estimator, &self.entropy_source, &self.signer_provider, counterparty_node_id.clone(), &self.channel_type_features(), &peer_state.latest_features, msg, user_channel_id, &self.default_configuration, best_block_height, &self.logger, outbound_scid_alias) { @@ -4475,7 +4552,7 @@ where ); } - entry.insert(channel); + entry.insert(Channel::InboundV1(channel)); } } Ok(()) @@ -4492,9 +4569,16 @@ where let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(msg.temporary_channel_id) { - hash_map::Entry::Occupied(mut chan) => { - try_chan_entry!(self, chan.get_mut().accept_channel(&msg, &self.default_configuration.channel_handshake_limits, &peer_state.latest_features), chan); - (chan.get().get_value_satoshis(), chan.get().get_funding_redeemscript().to_v0_p2wsh(), chan.get().get_user_id()) + hash_map::Entry::Occupied(mut chan_entry) => { + if let Channel::OutboundV1(chan) = chan_entry.get_mut() { + try_chan_entry!(self, chan.accept_channel(&msg, + &self.default_configuration.channel_handshake_limits, &peer_state.latest_features), chan_entry); + (chan.get_value_satoshis(), chan.get_funding_redeemscript().to_v0_p2wsh(), chan.get_user_id()) + } else { + return Err(MsgHandleErrInternal::send_err_msg_no_close( + format!("Channel with id {} is not a pending outbound channel", + log_bytes!(msg.temporary_channel_id)), msg.temporary_channel_id)); + } }, hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.temporary_channel_id)) } @@ -4522,10 +4606,40 @@ where let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; - let ((funding_msg, monitor), chan) = + let (funded_chan, funding_msg, monitor) = match peer_state.channel_by_id.entry(msg.temporary_channel_id) { - hash_map::Entry::Occupied(mut chan) => { - (try_chan_entry!(self, chan.get_mut().funding_created(msg, best_block, &self.signer_provider, &self.logger), chan), chan.remove()) + hash_map::Entry::Occupied(chan_entry) => { + let (channel_id, chan) = chan_entry.remove_entry(); + match chan { + Channel::InboundV1(chan) => { + match chan.funding_created(msg, best_block, &self.signer_provider, &self.logger) { + Ok(res) => { + res + }, + Err((chan, e)) => { + let mut chan = Channel::InboundV1(chan); + let (_, res) = convert_chan_err!(self, e, chan, &channel_id); + return Err(res); + } + } + } + Channel::OutboundV1(chan) => { + let err_msg = "Received funding_created for an outbound channel?".to_owned(); + log_error!(self.logger, "Closing channel {} due to close-required error: {}", log_bytes!(channel_id[..]), err_msg); + update_maps_on_chan_removal_ref!(self, chan); + let err = MsgHandleErrInternal::from_finish_shutdown(err_msg, channel_id, chan.get_user_id(), + (None, vec![]), None); + return Err(err); + }, + Channel::Funded(mut chan) => { + let err_msg = "Received funding_created for an already funded channel?".to_owned(); + log_error!(self.logger, "Closing channel {} due to close-required error: {}", log_bytes!(channel_id[..]), err_msg); + update_maps_on_chan_removal_ref!(self, chan); + let err = MsgHandleErrInternal::from_finish_shutdown(err_msg, channel_id, chan.get_user_id(), + chan.force_shutdown(true), self.get_channel_update_for_broadcast(&chan).ok()); + return Err(err); + }, + } }, hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.temporary_channel_id)) }; @@ -4535,14 +4649,14 @@ where Err(MsgHandleErrInternal::send_err_msg_no_close("Already had channel with the new channel_id".to_owned(), funding_msg.channel_id)) }, hash_map::Entry::Vacant(e) => { - match self.id_to_peer.lock().unwrap().entry(chan.channel_id()) { + match self.id_to_peer.lock().unwrap().entry(funded_chan.channel_id()) { hash_map::Entry::Occupied(_) => { return Err(MsgHandleErrInternal::send_err_msg_no_close( "The funding_created message had the same funding_txid as an existing channel - funding is not possible".to_owned(), funding_msg.channel_id)) }, hash_map::Entry::Vacant(i_e) => { - i_e.insert(chan.get_counterparty_node_id()); + i_e.insert(funded_chan.get_counterparty_node_id()); } } @@ -4558,21 +4672,25 @@ where let monitor_res = self.chain_monitor.watch_channel(monitor.get_funding_txo().0, monitor); - let chan = e.insert(chan); - let mut res = handle_new_monitor_update!(self, monitor_res, 0, peer_state_lock, peer_state, - per_peer_state, chan, MANUALLY_REMOVING, { peer_state.channel_by_id.remove(&new_channel_id) }); - - // Note that we reply with the new channel_id in error messages if we gave up on the - // channel, not the temporary_channel_id. This is compatible with ourselves, but the - // spec is somewhat ambiguous here. Not a huge deal since we'll send error messages for - // any messages referencing a previously-closed channel anyway. - // We do not propagate the monitor update to the user as it would be for a monitor - // that we didn't manage to store (and that we don't care about - we don't respond - // with the funding_signed so the channel can never go on chain). - if let Err(MsgHandleErrInternal { shutdown_finish: Some((res, _)), .. }) = &mut res { - res.0 = None; + let chan = e.insert(Channel::Funded(funded_chan)); + if let Channel::Funded(funded_chan) = chan { + let mut res = handle_new_monitor_update!(self, monitor_res, 0, peer_state_lock, peer_state, + per_peer_state, funded_chan, MANUALLY_REMOVING, { peer_state.channel_by_id.remove(&new_channel_id) }); + + // Note that we reply with the new channel_id in error messages if we gave up on the + // channel, not the temporary_channel_id. This is compatible with ourselves, but the + // spec is somewhat ambiguous here. Not a huge deal since we'll send error messages for + // any messages referencing a previously-closed channel anyway. + // We do not propagate the monitor update to the user as it would be for a monitor + // that we didn't manage to store (and that we don't care about - we don't respond + // with the funding_signed so the channel can never go on chain). + if let Err(MsgHandleErrInternal { shutdown_finish: Some((res, _)), .. }) = &mut res { + res.0 = None; + } + res + } else { + Ok(()) } - res } } } @@ -4589,20 +4707,30 @@ where let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(msg.channel_id) { - hash_map::Entry::Occupied(mut chan) => { - let monitor = try_chan_entry!(self, - chan.get_mut().funding_signed(&msg, best_block, &self.signer_provider, &self.logger), chan); - let update_res = self.chain_monitor.watch_channel(chan.get().get_funding_txo().unwrap(), monitor); - let mut res = handle_new_monitor_update!(self, update_res, 0, peer_state_lock, peer_state, per_peer_state, chan); - if let Err(MsgHandleErrInternal { ref mut shutdown_finish, .. }) = res { - // We weren't able to watch the channel to begin with, so no updates should be made on - // it. Previously, full_stack_target found an (unreachable) panic when the - // monitor update contained within `shutdown_finish` was applied. - if let Some((ref mut shutdown_finish, _)) = shutdown_finish { - shutdown_finish.0.take(); - } + hash_map::Entry::Occupied(mut chan_entry) => { + match chan_entry.get_mut() { + Channel::Funded(chan) => { + let monitor = try_chan_entry!(self, + chan.funding_signed(&msg, best_block, &self.signer_provider, &self.logger), chan_entry); + let update_res = self.chain_monitor.watch_channel(chan.get_funding_txo().unwrap(), monitor); + let mut res = handle_new_monitor_update!(self, update_res, 0, peer_state_lock, peer_state, per_peer_state, chan_entry); + if let Err(MsgHandleErrInternal { ref mut shutdown_finish, .. }) = res { + // We weren't able to watch the channel to begin with, so no updates should be made on + // it. Previously, full_stack_target found an (unreachable) panic when the + // monitor update contained within `shutdown_finish` was applied. + if let Some((ref mut shutdown_finish, _)) = shutdown_finish { + shutdown_finish.0.take(); + } + } + res + }, + Channel::InboundV1(_) => { + try_chan_entry!(self, Err(ChannelError::Close("Received funding_signed for an inbound channel?".to_owned())), chan_entry) + }, + Channel::OutboundV1(_) => { + try_chan_entry!(self, Err(ChannelError::Close("Received funding_signed before funding_generated sent.".to_owned())), chan_entry) + }, } - res }, hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id)) } @@ -4618,34 +4746,38 @@ where let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(msg.channel_id) { - hash_map::Entry::Occupied(mut chan) => { - let announcement_sigs_opt = try_chan_entry!(self, chan.get_mut().channel_ready(&msg, &self.node_signer, - self.genesis_hash.clone(), &self.default_configuration, &self.best_block.read().unwrap(), &self.logger), chan); - if let Some(announcement_sigs) = announcement_sigs_opt { - log_trace!(self.logger, "Sending announcement_signatures for channel {}", log_bytes!(chan.get().channel_id())); - peer_state.pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures { - node_id: counterparty_node_id.clone(), - msg: announcement_sigs, - }); - } else if chan.get().is_usable() { - // If we're sending an announcement_signatures, we'll send the (public) - // channel_update after sending a channel_announcement when we receive our - // counterparty's announcement_signatures. Thus, we only bother to send a - // channel_update here if the channel is not public, i.e. we're not sending an - // announcement_signatures. - log_trace!(self.logger, "Sending private initial channel_update for our counterparty on channel {}", log_bytes!(chan.get().channel_id())); - if let Ok(msg) = self.get_channel_update_for_unicast(chan.get()) { - peer_state.pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate { + hash_map::Entry::Occupied(mut chan_entry) => { + if let Channel::Funded(chan) = chan_entry.get_mut() { + let announcement_sigs_opt = try_chan_entry!(self, chan.channel_ready(&msg, &self.node_signer, + self.genesis_hash.clone(), &self.default_configuration, &self.best_block.read().unwrap(), &self.logger), chan_entry); + if let Some(announcement_sigs) = announcement_sigs_opt { + log_trace!(self.logger, "Sending announcement_signatures for channel {}", log_bytes!(chan.channel_id())); + peer_state.pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures { node_id: counterparty_node_id.clone(), - msg, + msg: announcement_sigs, }); + } else if chan.is_usable() { + // If we're sending an announcement_signatures, we'll send the (public) + // channel_update after sending a channel_announcement when we receive our + // counterparty's announcement_signatures. Thus, we only bother to send a + // channel_update here if the channel is not public, i.e. we're not sending an + // announcement_signatures. + log_trace!(self.logger, "Sending private initial channel_update for our counterparty on channel {}", log_bytes!(chan.channel_id())); + if let Ok(msg) = self.get_channel_update_for_unicast(chan) { + peer_state.pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate { + node_id: counterparty_node_id.clone(), + msg, + }); + } } - } - emit_channel_ready_event!(self, chan.get_mut()); + emit_channel_ready_event!(self, chan); - Ok(()) - }, + Ok(()) + } else { + try_chan_entry!(self, Err(ChannelError::Close(format!("Channel with id {} not yet funded", log_bytes!(chan_entry.get().channel_id())))), chan_entry) + } + } hash_map::Entry::Vacant(_) => Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) } } @@ -4663,35 +4795,38 @@ where let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(msg.channel_id.clone()) { hash_map::Entry::Occupied(mut chan_entry) => { + if let Channel::Funded(chan) = chan_entry.get_mut() { + if !chan.received_shutdown() { + log_info!(self.logger, "Received a shutdown message from our counterparty for channel {}{}.", + log_bytes!(msg.channel_id), + if chan.sent_shutdown() { " after we initiated shutdown" } else { "" }); + } - if !chan_entry.get().received_shutdown() { - log_info!(self.logger, "Received a shutdown message from our counterparty for channel {}{}.", - log_bytes!(msg.channel_id), - if chan_entry.get().sent_shutdown() { " after we initiated shutdown" } else { "" }); - } - - let funding_txo_opt = chan_entry.get().get_funding_txo(); - let (shutdown, monitor_update_opt, htlcs) = try_chan_entry!(self, - chan_entry.get_mut().shutdown(&self.signer_provider, &peer_state.latest_features, &msg), chan_entry); - dropped_htlcs = htlcs; + let funding_txo_opt = chan.get_funding_txo(); + let (shutdown, monitor_update_opt, htlcs) = try_chan_entry!(self, + chan.shutdown(&self.signer_provider, &peer_state.latest_features, &msg), chan_entry); + dropped_htlcs = htlcs; - if let Some(msg) = shutdown { - // We can send the `shutdown` message before updating the `ChannelMonitor` - // here as we don't need the monitor update to complete until we send a - // `shutdown_signed`, which we'll delay if we're pending a monitor update. - peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown { - node_id: *counterparty_node_id, - msg, - }); - } + if let Some(msg) = shutdown { + // We can send the `shutdown` message before updating the `ChannelMonitor` + // here as we don't need the monitor update to complete until we send a + // `shutdown_signed`, which we'll delay if we're pending a monitor update. + peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown { + node_id: *counterparty_node_id, + msg, + }); + } - // Update the monitor with the shutdown script if necessary. - if let Some(monitor_update) = monitor_update_opt { - let update_id = monitor_update.update_id; - let update_res = self.chain_monitor.update_channel(funding_txo_opt.unwrap(), monitor_update); - break handle_new_monitor_update!(self, update_res, update_id, peer_state_lock, peer_state, per_peer_state, chan_entry); + // Update the monitor with the shutdown script if necessary. + if let Some(monitor_update) = monitor_update_opt { + let update_id = monitor_update.update_id; + let update_res = self.chain_monitor.update_channel(funding_txo_opt.unwrap(), monitor_update); + break handle_new_monitor_update!(self, update_res, update_id, peer_state_lock, peer_state, per_peer_state, chan_entry); + } + break Ok(()); + } else { + try_chan_entry!(self, Err(ChannelError::Close("Channel not in a funded state".to_owned())), chan_entry) } - break Ok(()); }, hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) } @@ -4717,21 +4852,25 @@ where let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(msg.channel_id.clone()) { hash_map::Entry::Occupied(mut chan_entry) => { - let (closing_signed, tx) = try_chan_entry!(self, chan_entry.get_mut().closing_signed(&self.fee_estimator, &msg), chan_entry); - if let Some(msg) = closing_signed { - peer_state.pending_msg_events.push(events::MessageSendEvent::SendClosingSigned { - node_id: counterparty_node_id.clone(), - msg, - }); + if let Channel::Funded(chan) = chan_entry.get_mut() { + let (closing_signed, tx) = try_chan_entry!(self, chan.closing_signed(&self.fee_estimator, &msg), chan_entry); + if let Some(msg) = closing_signed { + peer_state.pending_msg_events.push(events::MessageSendEvent::SendClosingSigned { + node_id: counterparty_node_id.clone(), + msg, + }); + } + if tx.is_some() { + // We're done with this channel, we've got a signed closing transaction and + // will send the closing_signed back to the remote peer upon return. This + // also implies there are no pending HTLCs left on the channel, so we can + // fully delete it from tracking (the channel monitor is still around to + // watch for old state broadcasts)! + (tx, Some(remove_channel!(self, chan_entry))) + } else { (tx, None) } + } else { + try_chan_entry!(self, Err(ChannelError::Close("Channel not in funded state".to_owned())), chan_entry) } - if tx.is_some() { - // We're done with this channel, we've got a signed closing transaction and - // will send the closing_signed back to the remote peer upon return. This - // also implies there are no pending HTLCs left on the channel, so we can - // fully delete it from tracking (the channel monitor is still around to - // watch for old state broadcasts)! - (tx, Some(remove_channel!(self, chan_entry))) - } else { (tx, None) } }, hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) } @@ -4740,7 +4879,7 @@ where log_info!(self.logger, "Broadcasting {}", log_tx!(broadcast_tx)); self.tx_broadcaster.broadcast_transaction(&broadcast_tx); } - if let Some(chan) = chan_option { + if let Some(Channel::Funded(chan)) = chan_option { if let Ok(update) = self.get_channel_update_for_broadcast(&chan) { let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; @@ -4773,31 +4912,34 @@ where let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(msg.channel_id) { - hash_map::Entry::Occupied(mut chan) => { - - let create_pending_htlc_status = |chan: &FundedChannel<::Signer>, pending_forward_info: PendingHTLCStatus, error_code: u16| { - // If the update_add is completely bogus, the call will Err and we will close, - // but if we've sent a shutdown and they haven't acknowledged it yet, we just - // want to reject the new HTLC and fail it backwards instead of forwarding. - match pending_forward_info { - PendingHTLCStatus::Forward(PendingHTLCInfo { ref incoming_shared_secret, .. }) => { - let reason = if (error_code & 0x1000) != 0 { - let (real_code, error_data) = self.get_htlc_inbound_temp_fail_err_and_data(error_code, chan); - HTLCFailReason::reason(real_code, error_data) - } else { - HTLCFailReason::from_failure_code(error_code) - }.get_encrypted_failure_packet(incoming_shared_secret, &None); - let msg = msgs::UpdateFailHTLC { - channel_id: msg.channel_id, - htlc_id: msg.htlc_id, - reason - }; - PendingHTLCStatus::Fail(HTLCFailureMsg::Relay(msg)) - }, - _ => pending_forward_info - } - }; - try_chan_entry!(self, chan.get_mut().update_add_htlc(&msg, pending_forward_info, create_pending_htlc_status, &self.logger), chan); + hash_map::Entry::Occupied(mut chan_entry) => { + if let Channel::Funded(chan) = chan_entry.get_mut() { + let create_pending_htlc_status = |chan: &FundedChannel<::Signer>, pending_forward_info: PendingHTLCStatus, error_code: u16| { + // If the update_add is completely bogus, the call will Err and we will close, + // but if we've sent a shutdown and they haven't acknowledged it yet, we just + // want to reject the new HTLC and fail it backwards instead of forwarding. + match pending_forward_info { + PendingHTLCStatus::Forward(PendingHTLCInfo { ref incoming_shared_secret, .. }) => { + let reason = if (error_code & 0x1000) != 0 { + let (real_code, error_data) = self.get_htlc_inbound_temp_fail_err_and_data(error_code, chan); + HTLCFailReason::reason(real_code, error_data) + } else { + HTLCFailReason::from_failure_code(error_code) + }.get_encrypted_failure_packet(incoming_shared_secret, &None); + let msg = msgs::UpdateFailHTLC { + channel_id: msg.channel_id, + htlc_id: msg.htlc_id, + reason + }; + PendingHTLCStatus::Fail(HTLCFailureMsg::Relay(msg)) + }, + _ => pending_forward_info + } + }; + try_chan_entry!(self, chan.update_add_htlc(&msg, pending_forward_info, create_pending_htlc_status, &self.logger), chan_entry); + } else { + try_chan_entry!(self, Err(ChannelError::Close(format!("Channel with id {} not yet funded", log_bytes!(chan_entry.get().channel_id())))), chan_entry); + } }, hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) } @@ -4815,8 +4957,12 @@ where let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(msg.channel_id) { - hash_map::Entry::Occupied(mut chan) => { - try_chan_entry!(self, chan.get_mut().update_fulfill_htlc(&msg), chan) + hash_map::Entry::Occupied(mut chan_entry) => { + if let Channel::Funded(chan) = chan_entry.get_mut() { + try_chan_entry!(self, chan.update_fulfill_htlc(&msg), chan_entry) + } else { + try_chan_entry!(self, Err(ChannelError::Close("Channel not yet funded".into())), chan_entry) + } }, hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) } @@ -4835,8 +4981,12 @@ where let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(msg.channel_id) { - hash_map::Entry::Occupied(mut chan) => { - try_chan_entry!(self, chan.get_mut().update_fail_htlc(&msg, HTLCFailReason::from_msg(msg)), chan); + hash_map::Entry::Occupied(mut chan_entry) => { + if let Channel::Funded(chan) = chan_entry.get_mut() { + try_chan_entry!(self, chan.update_fail_htlc(&msg, HTLCFailReason::from_msg(msg)), chan_entry); + } else { + try_chan_entry!(self, Err(ChannelError::Close("Channel not yet funded".into())), chan_entry) + } }, hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) } @@ -4853,13 +5003,17 @@ where let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(msg.channel_id) { - hash_map::Entry::Occupied(mut chan) => { - if (msg.failure_code & 0x8000) == 0 { - let chan_err: ChannelError = ChannelError::Close("Got update_fail_malformed_htlc with BADONION not set".to_owned()); - try_chan_entry!(self, Err(chan_err), chan); + hash_map::Entry::Occupied(mut chan_entry) => { + if let Channel::Funded(chan) = chan_entry.get_mut() { + if (msg.failure_code & 0x8000) == 0 { + let chan_err: ChannelError = ChannelError::Close("Got update_fail_malformed_htlc with BADONION not set".to_owned()); + try_chan_entry!(self, Err(chan_err), chan_entry); + } + try_chan_entry!(self, chan.update_fail_malformed_htlc(&msg, HTLCFailReason::reason(msg.failure_code, msg.sha256_of_onion.to_vec())), chan_entry); + Ok(()) + } else { + try_chan_entry!(self, Err(ChannelError::Close("Channel not yet funded".into())), chan_entry) } - try_chan_entry!(self, chan.get_mut().update_fail_malformed_htlc(&msg, HTLCFailReason::reason(msg.failure_code, msg.sha256_of_onion.to_vec())), chan); - Ok(()) }, hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) } @@ -4875,13 +5029,17 @@ where let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(msg.channel_id) { - hash_map::Entry::Occupied(mut chan) => { - let funding_txo = chan.get().get_funding_txo(); - let monitor_update = try_chan_entry!(self, chan.get_mut().commitment_signed(&msg, &self.logger), chan); - let update_res = self.chain_monitor.update_channel(funding_txo.unwrap(), monitor_update); - let update_id = monitor_update.update_id; - handle_new_monitor_update!(self, update_res, update_id, peer_state_lock, - peer_state, per_peer_state, chan) + hash_map::Entry::Occupied(mut chan_entry) => { + if let Channel::Funded(chan) = chan_entry.get_mut() { + let funding_txo = chan.get_funding_txo(); + let monitor_update = try_chan_entry!(self, chan.commitment_signed(&msg, &self.logger), chan_entry); + let update_res = self.chain_monitor.update_channel(funding_txo.unwrap(), monitor_update); + let update_id = monitor_update.update_id; + handle_new_monitor_update!(self, update_res, update_id, peer_state_lock, + peer_state, per_peer_state, chan_entry) + } else { + try_chan_entry!(self, Err(ChannelError::Close("Channel not yet funded".into())), chan_entry) + } }, hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) } @@ -4994,14 +5152,18 @@ where }).map(|mtx| mtx.lock().unwrap())?; let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(msg.channel_id) { - hash_map::Entry::Occupied(mut chan) => { - let funding_txo = chan.get().get_funding_txo(); - let (htlcs_to_fail, monitor_update) = try_chan_entry!(self, chan.get_mut().revoke_and_ack(&msg, &self.logger), chan); - let update_res = self.chain_monitor.update_channel(funding_txo.unwrap(), monitor_update); - let update_id = monitor_update.update_id; - let res = handle_new_monitor_update!(self, update_res, update_id, - peer_state_lock, peer_state, per_peer_state, chan); - (htlcs_to_fail, res) + hash_map::Entry::Occupied(mut chan_entry) => { + if let Channel::Funded(chan) = chan_entry.get_mut() { + let funding_txo = chan.get_funding_txo(); + let (htlcs_to_fail, monitor_update) = try_chan_entry!(self, chan.revoke_and_ack(&msg, &self.logger), chan_entry); + let update_res = self.chain_monitor.update_channel(funding_txo.unwrap(), monitor_update); + let update_id = monitor_update.update_id; + let res = handle_new_monitor_update!(self, update_res, update_id, + peer_state_lock, peer_state, per_peer_state, chan_entry); + (htlcs_to_fail, res) + } else { + try_chan_entry!(self, Err(ChannelError::Close("Channel not yet funded".into())), chan_entry) + } }, hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) } @@ -5020,8 +5182,12 @@ where let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(msg.channel_id) { - hash_map::Entry::Occupied(mut chan) => { - try_chan_entry!(self, chan.get_mut().update_fee(&self.fee_estimator, &msg, &self.logger), chan); + hash_map::Entry::Occupied(mut chan_entry) => { + if let Channel::Funded(chan) = chan_entry.get_mut() { + try_chan_entry!(self, chan.update_fee(&self.fee_estimator, &msg, &self.logger), chan_entry); + } else { + try_chan_entry!(self, Err(ChannelError::Close("Channel not yet funded".into())), chan_entry); + } }, hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) } @@ -5038,20 +5204,24 @@ where let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(msg.channel_id) { - hash_map::Entry::Occupied(mut chan) => { - if !chan.get().is_usable() { - return Err(MsgHandleErrInternal::from_no_close(LightningError{err: "Got an announcement_signatures before we were ready for it".to_owned(), action: msgs::ErrorAction::IgnoreError})); - } + hash_map::Entry::Occupied(mut chan_entry) => { + if let Channel::Funded(chan) = chan_entry.get_mut() { + if !chan.is_usable() { + return Err(MsgHandleErrInternal::from_no_close(LightningError{err: "Got an announcement_signatures before we were ready for it".to_owned(), action: msgs::ErrorAction::IgnoreError})); + } - peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelAnnouncement { - msg: try_chan_entry!(self, chan.get_mut().announcement_signatures( - &self.node_signer, self.genesis_hash.clone(), self.best_block.read().unwrap().height(), - msg, &self.default_configuration - ), chan), - // Note that announcement_signatures fails if the channel cannot be announced, - // so get_channel_update_for_broadcast will never fail by the time we get here. - update_msg: Some(self.get_channel_update_for_broadcast(chan.get()).unwrap()), - }); + peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelAnnouncement { + msg: try_chan_entry!(self, chan.announcement_signatures( + &self.node_signer, self.genesis_hash.clone(), self.best_block.read().unwrap().height(), + msg, &self.default_configuration + ), chan_entry), + // Note that announcement_signatures fails if the channel cannot be announced, + // so get_channel_update_for_broadcast will never fail by the time we get here. + update_msg: Some(self.get_channel_update_for_broadcast(chan).unwrap()), + }); + } else { + try_chan_entry!(self, Err(ChannelError::Close("Channel not yet funded".into())), chan_entry) + } }, hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) } @@ -5075,23 +5245,27 @@ where let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap(); let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(chan_id) { - hash_map::Entry::Occupied(mut chan) => { - if chan.get().get_counterparty_node_id() != *counterparty_node_id { - if chan.get().should_announce() { - // If the announcement is about a channel of ours which is public, some - // other peer may simply be forwarding all its gossip to us. Don't provide - // a scary-looking error message and return Ok instead. + hash_map::Entry::Occupied(mut chan_entry) => { + if let Channel::Funded(chan) = chan_entry.get_mut() { + if chan.get_counterparty_node_id() != *counterparty_node_id { + if chan.should_announce() { + // If the announcement is about a channel of ours which is public, some + // other peer may simply be forwarding all its gossip to us. Don't provide + // a scary-looking error message and return Ok instead. + return Ok(NotifyOption::SkipPersist); + } + return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a channel_update for a channel from the wrong node - it shouldn't know about our private channels!".to_owned(), chan_id)); + } + let were_node_one = self.get_our_node_id().serialize()[..] < chan.get_counterparty_node_id().serialize()[..]; + let msg_from_node_one = msg.contents.flags & 1 == 0; + if were_node_one == msg_from_node_one { return Ok(NotifyOption::SkipPersist); + } else { + log_debug!(self.logger, "Received channel_update for channel {}.", log_bytes!(chan_id)); + try_chan_entry!(self, chan.channel_update(&msg), chan_entry); } - return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a channel_update for a channel from the wrong node - it shouldn't know about our private channels!".to_owned(), chan_id)); - } - let were_node_one = self.get_our_node_id().serialize()[..] < chan.get().get_counterparty_node_id().serialize()[..]; - let msg_from_node_one = msg.contents.flags & 1 == 0; - if were_node_one == msg_from_node_one { - return Ok(NotifyOption::SkipPersist); } else { - log_debug!(self.logger, "Received channel_update for channel {}.", log_bytes!(chan_id)); - try_chan_entry!(self, chan.get_mut().channel_update(&msg), chan); + try_chan_entry!(self, Err(ChannelError::Close("Channel not yet funded".into())), chan_entry); } }, hash_map::Entry::Vacant(_) => return Ok(NotifyOption::SkipPersist) @@ -5100,7 +5274,7 @@ where } fn internal_channel_reestablish(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReestablish) -> Result<(), MsgHandleErrInternal> { - let htlc_forwards; + let mut htlc_forwards = None; let need_lnd_workaround = { let per_peer_state = self.per_peer_state.read().unwrap(); @@ -5112,39 +5286,43 @@ where let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(msg.channel_id) { - hash_map::Entry::Occupied(mut chan) => { - // Currently, we expect all holding cell update_adds to be dropped on peer - // disconnect, so Channel's reestablish will never hand us any holding cell - // freed HTLCs to fail backwards. If in the future we no longer drop pending - // add-HTLCs on disconnect, we may be handed HTLCs to fail backwards here. - let responses = try_chan_entry!(self, chan.get_mut().channel_reestablish( - msg, &self.logger, &self.node_signer, self.genesis_hash, - &self.default_configuration, &*self.best_block.read().unwrap()), chan); - let mut channel_update = None; - if let Some(msg) = responses.shutdown_msg { - peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown { - node_id: counterparty_node_id.clone(), - msg, - }); - } else if chan.get().is_usable() { - // If the channel is in a usable state (ie the channel is not being shut - // down), send a unicast channel_update to our counterparty to make sure - // they have the latest channel parameters. - if let Ok(msg) = self.get_channel_update_for_unicast(chan.get()) { - channel_update = Some(events::MessageSendEvent::SendChannelUpdate { - node_id: chan.get().get_counterparty_node_id(), + hash_map::Entry::Occupied(mut chan_entry) => { + if let Channel::Funded(chan) = chan_entry.get_mut() { + // Currently, we expect all holding cell update_adds to be dropped on peer + // disconnect, so Channel's reestablish will never hand us any holding cell + // freed HTLCs to fail backwards. If in the future we no longer drop pending + // add-HTLCs on disconnect, we may be handed HTLCs to fail backwards here. + let responses = try_chan_entry!(self, chan.channel_reestablish( + msg, &self.logger, &self.node_signer, self.genesis_hash, + &self.default_configuration, &*self.best_block.read().unwrap()), chan_entry); + let mut channel_update = None; + if let Some(msg) = responses.shutdown_msg { + peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown { + node_id: counterparty_node_id.clone(), msg, }); + } else if chan.is_usable() { + // If the channel is in a usable state (ie the channel is not being shut + // down), send a unicast channel_update to our counterparty to make sure + // they have the latest channel parameters. + if let Ok(msg) = self.get_channel_update_for_unicast(chan) { + channel_update = Some(events::MessageSendEvent::SendChannelUpdate { + node_id: chan.get_counterparty_node_id(), + msg, + }); + } } + let need_lnd_workaround = chan.take_workaround_lnd_bug_4006(); + htlc_forwards = self.handle_channel_resumption( + &mut peer_state.pending_msg_events, chan, responses.raa, responses.commitment_update, responses.order, + Vec::new(), None, responses.channel_ready, responses.announcement_sigs); + if let Some(upd) = channel_update { + peer_state.pending_msg_events.push(upd); + } + need_lnd_workaround + } else { + try_chan_entry!(self, Err(ChannelError::Close("Channel not yet funded".into())), chan_entry) } - let need_lnd_workaround = chan.get_mut().take_workaround_lnd_bug_4006(); - htlc_forwards = self.handle_channel_resumption( - &mut peer_state.pending_msg_events, chan.get_mut(), responses.raa, responses.commitment_update, responses.order, - Vec::new(), None, responses.channel_ready, responses.announcement_sigs); - if let Some(upd) = channel_update { - peer_state.pending_msg_events.push(upd); - } - need_lnd_workaround }, hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) } @@ -5199,25 +5377,27 @@ where let peer_state = &mut *peer_state_lock; let pending_msg_events = &mut peer_state.pending_msg_events; if let hash_map::Entry::Occupied(chan_entry) = peer_state.channel_by_id.entry(funding_outpoint.to_channel_id()) { - let mut chan = remove_channel!(self, chan_entry); - failed_channels.push(chan.force_shutdown(false)); - if let Ok(update) = self.get_channel_update_for_broadcast(&chan) { - pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { - msg: update + let chan = remove_channel!(self, chan_entry); + if let Channel::Funded(mut chan) = chan { + failed_channels.push(chan.force_shutdown(false)); + if let Ok(update) = self.get_channel_update_for_broadcast(&chan) { + pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { + msg: update + }); + } + let reason = if let MonitorEvent::UpdateFailed(_) = monitor_event { + ClosureReason::ProcessingError { err: "Failed to persist ChannelMonitor update during chain sync".to_string() } + } else { + ClosureReason::CommitmentTxConfirmed + }; + self.issue_channel_close_events(&chan, reason); + pending_msg_events.push(events::MessageSendEvent::HandleError { + node_id: chan.get_counterparty_node_id(), + action: msgs::ErrorAction::SendErrorMessage { + msg: msgs::ErrorMessage { channel_id: chan.channel_id(), data: "Channel force-closed".to_owned() } + }, }); } - let reason = if let MonitorEvent::UpdateFailed(_) = monitor_event { - ClosureReason::ProcessingError { err: "Failed to persist ChannelMonitor update during chain sync".to_string() } - } else { - ClosureReason::CommitmentTxConfirmed - }; - self.issue_channel_close_events(&chan, reason); - pending_msg_events.push(events::MessageSendEvent::HandleError { - node_id: chan.get_counterparty_node_id(), - action: msgs::ErrorAction::SendErrorMessage { - msg: msgs::ErrorMessage { channel_id: chan.channel_id(), data: "Channel force-closed".to_owned() } - }, - }); } } } @@ -5269,27 +5449,29 @@ where let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state: &mut PeerState<_> = &mut *peer_state_lock; for (channel_id, chan) in peer_state.channel_by_id.iter_mut() { - let counterparty_node_id = chan.get_counterparty_node_id(); - let funding_txo = chan.get_funding_txo(); - let (monitor_opt, holding_cell_failed_htlcs) = - chan.maybe_free_holding_cell_htlcs(&self.logger); - if !holding_cell_failed_htlcs.is_empty() { - failed_htlcs.push((holding_cell_failed_htlcs, *channel_id, counterparty_node_id)); - } - if let Some(monitor_update) = monitor_opt { - has_monitor_update = true; - - let update_res = self.chain_monitor.update_channel( - funding_txo.expect("channel is live"), monitor_update); - let update_id = monitor_update.update_id; - let channel_id: [u8; 32] = *channel_id; - let res = handle_new_monitor_update!(self, update_res, update_id, - peer_state_lock, peer_state, per_peer_state, chan, MANUALLY_REMOVING, - peer_state.channel_by_id.remove(&channel_id)); - if res.is_err() { - handle_errors.push((counterparty_node_id, res)); + if let Channel::Funded(chan) = chan { + let counterparty_node_id = chan.get_counterparty_node_id(); + let funding_txo = chan.get_funding_txo(); + let (monitor_opt, holding_cell_failed_htlcs) = + chan.maybe_free_holding_cell_htlcs(&self.logger); + if !holding_cell_failed_htlcs.is_empty() { + failed_htlcs.push((holding_cell_failed_htlcs, *channel_id, counterparty_node_id)); } - continue 'peer_loop; + if let Some(monitor_update) = monitor_opt { + has_monitor_update = true; + + let update_res = self.chain_monitor.update_channel( + funding_txo.expect("channel is live"), monitor_update); + let update_id = monitor_update.update_id; + let channel_id: [u8; 32] = *channel_id; + let res = handle_new_monitor_update!(self, update_res, update_id, + peer_state_lock, peer_state, per_peer_state, chan, MANUALLY_REMOVING, + peer_state.channel_by_id.remove(&channel_id)); + if res.is_err() { + handle_errors.push((counterparty_node_id, res)); + } + continue 'peer_loop; + } else { /* Unfunded channel ignored */ } } } break 'chan_loop; @@ -5324,37 +5506,50 @@ where let peer_state = &mut *peer_state_lock; let pending_msg_events = &mut peer_state.pending_msg_events; peer_state.channel_by_id.retain(|channel_id, chan| { - match chan.maybe_propose_closing_signed(&self.fee_estimator, &self.logger) { - Ok((msg_opt, tx_opt)) => { - if let Some(msg) = msg_opt { - has_update = true; - pending_msg_events.push(events::MessageSendEvent::SendClosingSigned { - node_id: chan.get_counterparty_node_id(), msg, - }); - } - if let Some(tx) = tx_opt { - // We're done with this channel. We got a closing_signed and sent back - // a closing_signed with a closing transaction to broadcast. - if let Ok(update) = self.get_channel_update_for_broadcast(&chan) { - pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { - msg: update + if let Channel::Funded(chan) = chan { + match chan.maybe_propose_closing_signed(&self.fee_estimator, &self.logger) { + Ok((msg_opt, tx_opt)) => { + if let Some(msg) = msg_opt { + has_update = true; + pending_msg_events.push(events::MessageSendEvent::SendClosingSigned { + node_id: chan.get_counterparty_node_id(), msg, }); } + if let Some(tx) = tx_opt { + // We're done with this channel. We got a closing_signed and sent back + // a closing_signed with a closing transaction to broadcast. + if let Ok(update) = self.get_channel_update_for_broadcast(&chan) { + pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { + msg: update + }); + } - self.issue_channel_close_events(chan, ClosureReason::CooperativeClosure); + self.issue_channel_close_events(chan, ClosureReason::CooperativeClosure); - log_info!(self.logger, "Broadcasting {}", log_tx!(tx)); - self.tx_broadcaster.broadcast_transaction(&tx); - update_maps_on_chan_removal!(self, chan); + log_info!(self.logger, "Broadcasting {}", log_tx!(tx)); + self.tx_broadcaster.broadcast_transaction(&tx); + update_maps_on_chan_removal!(self, chan); + false + } else { true } + }, + Err(ChannelError::Close(err_msg)) => { + has_update = true; + log_error!(self.logger, "Closing channel {} due to close-required error: {}", log_bytes!(channel_id[..]), err_msg); + update_maps_on_chan_removal_ref!(self, chan); + let err = MsgHandleErrInternal::from_finish_shutdown(err_msg, *channel_id, chan.get_user_id(), + chan.force_shutdown(true), self.get_channel_update_for_broadcast(&chan).ok()); + handle_errors.push((chan.get_counterparty_node_id(), Err(err))); false - } else { true } - }, - Err(e) => { - has_update = true; - let (close_channel, res) = convert_chan_err!(self, e, chan, channel_id); - handle_errors.push((chan.get_counterparty_node_id(), Err(res))); - !close_channel + }, + Err(err) => { + let err = MsgHandleErrInternal::from_chan_no_close(err, channel_id.clone()); + handle_errors.push((chan.get_counterparty_node_id(), Err(err))); + true + }, } + } else { + // Channel is unfunded. Just retain and continue. + true } }); } @@ -5607,9 +5802,11 @@ where let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; for chan in peer_state.channel_by_id.values() { - for (htlc_source, _) in chan.inflight_htlc_sources() { - if let HTLCSource::OutboundRoute { path, .. } = htlc_source { - inflight_htlcs.process_path(path, self.get_our_node_id()); + if let Channel::Funded(chan) = chan { + for (htlc_source, _) in chan.inflight_htlc_sources() { + if let HTLCSource::OutboundRoute { path, .. } = htlc_source { + inflight_htlcs.process_path(path, self.get_our_node_id()); + } } } } @@ -5945,84 +6142,89 @@ where let peer_state = &mut *peer_state_lock; let pending_msg_events = &mut peer_state.pending_msg_events; peer_state.channel_by_id.retain(|_, channel| { - let res = f(channel); - if let Ok((channel_ready_opt, mut timed_out_pending_htlcs, announcement_sigs)) = res { - for (source, payment_hash) in timed_out_pending_htlcs.drain(..) { - let (failure_code, data) = self.get_htlc_inbound_temp_fail_err_and_data(0x1000|14 /* expiry_too_soon */, &channel); - timed_out_htlcs.push((source, payment_hash, HTLCFailReason::reason(failure_code, data), - HTLCDestination::NextHopChannel { node_id: Some(channel.get_counterparty_node_id()), channel_id: channel.channel_id() })); - } - if let Some(channel_ready) = channel_ready_opt { - send_channel_ready!(self, pending_msg_events, channel, channel_ready); - if channel.is_usable() { - log_trace!(self.logger, "Sending channel_ready with private initial channel_update for our counterparty on channel {}", log_bytes!(channel.channel_id())); - if let Ok(msg) = self.get_channel_update_for_unicast(channel) { - pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate { - node_id: channel.get_counterparty_node_id(), - msg, - }); + if let Channel::Funded(channel) = channel { + let res = f(channel); + if let Ok((channel_ready_opt, mut timed_out_pending_htlcs, announcement_sigs)) = res { + for (source, payment_hash) in timed_out_pending_htlcs.drain(..) { + let (failure_code, data) = self.get_htlc_inbound_temp_fail_err_and_data(0x1000|14 /* expiry_too_soon */, &channel); + timed_out_htlcs.push((source, payment_hash, HTLCFailReason::reason(failure_code, data), + HTLCDestination::NextHopChannel { node_id: Some(channel.get_counterparty_node_id()), channel_id: channel.channel_id() })); + } + if let Some(channel_ready) = channel_ready_opt { + send_channel_ready!(self, pending_msg_events, channel, channel_ready); + if channel.is_usable() { + log_trace!(self.logger, "Sending channel_ready with private initial channel_update for our counterparty on channel {}", log_bytes!(channel.channel_id())); + if let Ok(msg) = self.get_channel_update_for_unicast(channel) { + pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate { + node_id: channel.get_counterparty_node_id(), + msg, + }); + } + } else { + log_trace!(self.logger, "Sending channel_ready WITHOUT channel_update for {}", log_bytes!(channel.channel_id())); } - } else { - log_trace!(self.logger, "Sending channel_ready WITHOUT channel_update for {}", log_bytes!(channel.channel_id())); } - } - emit_channel_ready_event!(self, channel); + emit_channel_ready_event!(self, channel); - if let Some(announcement_sigs) = announcement_sigs { - log_trace!(self.logger, "Sending announcement_signatures for channel {}", log_bytes!(channel.channel_id())); - pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures { - node_id: channel.get_counterparty_node_id(), - msg: announcement_sigs, - }); - if let Some(height) = height_opt { - if let Some(announcement) = channel.get_signed_channel_announcement(&self.node_signer, self.genesis_hash, height, &self.default_configuration) { - pending_msg_events.push(events::MessageSendEvent::BroadcastChannelAnnouncement { - msg: announcement, - // Note that announcement_signatures fails if the channel cannot be announced, - // so get_channel_update_for_broadcast will never fail by the time we get here. - update_msg: Some(self.get_channel_update_for_broadcast(channel).unwrap()), - }); + if let Some(announcement_sigs) = announcement_sigs { + log_trace!(self.logger, "Sending announcement_signatures for channel {}", log_bytes!(channel.channel_id())); + pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures { + node_id: channel.get_counterparty_node_id(), + msg: announcement_sigs, + }); + if let Some(height) = height_opt { + if let Some(announcement) = channel.get_signed_channel_announcement(&self.node_signer, self.genesis_hash, height, &self.default_configuration) { + pending_msg_events.push(events::MessageSendEvent::BroadcastChannelAnnouncement { + msg: announcement, + // Note that announcement_signatures fails if the channel cannot be announced, + // so get_channel_update_for_broadcast will never fail by the time we get here. + update_msg: Some(self.get_channel_update_for_broadcast(channel).unwrap()), + }); + } } } - } - if channel.is_our_channel_ready() { - if let Some(real_scid) = channel.get_short_channel_id() { - // If we sent a 0conf channel_ready, and now have an SCID, we add it - // to the short_to_chan_info map here. Note that we check whether we - // can relay using the real SCID at relay-time (i.e. - // enforce option_scid_alias then), and if the funding tx is ever - // un-confirmed we force-close the channel, ensuring short_to_chan_info - // is always consistent. - let mut short_to_chan_info = self.short_to_chan_info.write().unwrap(); - let scid_insert = short_to_chan_info.insert(real_scid, (channel.get_counterparty_node_id(), channel.channel_id())); - assert!(scid_insert.is_none() || scid_insert.unwrap() == (channel.get_counterparty_node_id(), channel.channel_id()), - "SCIDs should never collide - ensure you weren't behind by a full {} blocks when creating channels", - fake_scid::MAX_SCID_BLOCKS_FROM_NOW); + if channel.is_our_channel_ready() { + if let Some(real_scid) = channel.get_short_channel_id() { + // If we sent a 0conf channel_ready, and now have an SCID, we add it + // to the short_to_chan_info map here. Note that we check whether we + // can relay using the real SCID at relay-time (i.e. + // enforce option_scid_alias then), and if the funding tx is ever + // un-confirmed we force-close the channel, ensuring short_to_chan_info + // is always consistent. + let mut short_to_chan_info = self.short_to_chan_info.write().unwrap(); + let scid_insert = short_to_chan_info.insert(real_scid, (channel.get_counterparty_node_id(), channel.channel_id())); + assert!(scid_insert.is_none() || scid_insert.unwrap() == (channel.get_counterparty_node_id(), channel.channel_id()), + "SCIDs should never collide - ensure you weren't behind by a full {} blocks when creating channels", + fake_scid::MAX_SCID_BLOCKS_FROM_NOW); + } } - } - } else if let Err(reason) = res { - update_maps_on_chan_removal!(self, channel); - // It looks like our counterparty went on-chain or funding transaction was - // reorged out of the main chain. Close the channel. - failed_channels.push(channel.force_shutdown(true)); - if let Ok(update) = self.get_channel_update_for_broadcast(&channel) { - pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { - msg: update + } else if let Err(reason) = res { + update_maps_on_chan_removal!(self, channel); + // It looks like our counterparty went on-chain or funding transaction was + // reorged out of the main chain. Close the channel. + failed_channels.push(channel.force_shutdown(true)); + if let Ok(update) = self.get_channel_update_for_broadcast(&channel) { + pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { + msg: update + }); + } + let reason_message = format!("{}", reason); + self.issue_channel_close_events(channel, reason); + pending_msg_events.push(events::MessageSendEvent::HandleError { + node_id: channel.get_counterparty_node_id(), + action: msgs::ErrorAction::SendErrorMessage { msg: msgs::ErrorMessage { + channel_id: channel.channel_id(), + data: reason_message, + } }, }); + return false; } - let reason_message = format!("{}", reason); - self.issue_channel_close_events(channel, reason); - pending_msg_events.push(events::MessageSendEvent::HandleError { - node_id: channel.get_counterparty_node_id(), - action: msgs::ErrorAction::SendErrorMessage { msg: msgs::ErrorMessage { - channel_id: channel.channel_id(), - data: reason_message, - } }, - }); - return false; + true + } else { + // Our channel is not yet funded. + true } - true }); } } @@ -6270,13 +6472,18 @@ where let peer_state = &mut *peer_state_lock; let pending_msg_events = &mut peer_state.pending_msg_events; peer_state.channel_by_id.retain(|_, chan| { - chan.remove_uncommitted_htlcs_and_mark_paused(&self.logger); - if chan.is_shutdown() { - update_maps_on_chan_removal!(self, chan); - self.issue_channel_close_events(chan, ClosureReason::DisconnectedPeer); - return false; + if let Channel::Funded(chan) = chan { + chan.remove_uncommitted_htlcs_and_mark_paused(&self.logger); + if chan.is_shutdown() { + update_maps_on_chan_removal!(self, chan); + self.issue_channel_close_events(chan, ClosureReason::DisconnectedPeer); + return false; + } + true + } else { + // Channel is not yet funded. + true } - true }); pending_msg_events.retain(|msg| { match msg { @@ -6374,32 +6581,30 @@ where let peer_state = &mut *peer_state_lock; let pending_msg_events = &mut peer_state.pending_msg_events; peer_state.channel_by_id.retain(|_, chan| { - let retain = if chan.get_counterparty_node_id() == *counterparty_node_id { - if !chan.have_received_message() { - // If we created this (outbound) channel while we were disconnected from the - // peer we probably failed to send the open_channel message, which is now - // lost. We can't have had anything pending related to this channel, so we just - // drop it. - false - } else { + if !chan.have_received_message() { + // If we created this (outbound) channel while we were disconnected from the + // peer we probably failed to send the open_channel message, which is now + // lost. We can't have had anything pending related to this channel, so we just + // drop it. + return false; + } + if let Channel::Funded(chan) = chan { + if chan.get_counterparty_node_id() == *counterparty_node_id { pending_msg_events.push(events::MessageSendEvent::SendChannelReestablish { node_id: chan.get_counterparty_node_id(), msg: chan.get_channel_reestablish(&self.logger), }); - true - } - } else { true }; - if retain && chan.get_counterparty_node_id() != *counterparty_node_id { - if let Some(msg) = chan.get_signed_channel_announcement(&self.node_signer, self.genesis_hash.clone(), self.best_block.read().unwrap().height(), &self.default_configuration) { - if let Ok(update_msg) = self.get_channel_update_for_broadcast(chan) { - pending_msg_events.push(events::MessageSendEvent::SendChannelAnnouncement { - node_id: *counterparty_node_id, - msg, update_msg, - }); + if let Some(msg) = chan.get_signed_channel_announcement(&self.node_signer, self.genesis_hash.clone(), self.best_block.read().unwrap().height(), &self.default_configuration) { + if let Ok(update_msg) = self.get_channel_update_for_broadcast(chan) { + pending_msg_events.push(events::MessageSendEvent::SendChannelAnnouncement { + node_id: *counterparty_node_id, + msg, update_msg, + }); + } } } } - retain + true }); } //TODO: Also re-broadcast announcement_signatures @@ -6430,7 +6635,7 @@ where if peer_state_mutex_opt.is_none() { return; } let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap(); let peer_state = &mut *peer_state_lock; - if let Some(chan) = peer_state.channel_by_id.get_mut(&msg.channel_id) { + if let Some(Channel::OutboundV1(chan)) = peer_state.channel_by_id.get_mut(&msg.channel_id) { if let Ok(msg) = chan.maybe_handle_error_without_close(self.genesis_hash) { peer_state.pending_msg_events.push(events::MessageSendEvent::SendOpenChannel { node_id: *counterparty_node_id, @@ -6953,11 +7158,10 @@ where for (_, peer_state_mutex) in per_peer_state.iter() { let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; - for (_, channel) in peer_state.channel_by_id.iter() { - if channel.is_funding_initiated() { - channel.write(writer)?; - } - } + for channel in peer_state.channel_by_id.iter().filter_map(|(_, chan)| match chan { + Channel::Funded(chan) => Some(chan), + _ => None, + }) { channel.write(writer)?; } } } @@ -7271,7 +7475,7 @@ where let channel_count: u64 = Readable::read(reader)?; let mut funding_txo_set = HashSet::with_capacity(cmp::min(channel_count as usize, 128)); - let mut peer_channels: HashMap::Signer>>> = HashMap::with_capacity(cmp::min(channel_count as usize, 128)); + let mut peer_channels: HashMap::Signer>>> = HashMap::with_capacity(cmp::min(channel_count as usize, 128)); let mut id_to_peer = HashMap::with_capacity(cmp::min(channel_count as usize, 128)); let mut short_to_chan_info = HashMap::with_capacity(cmp::min(channel_count as usize, 128)); let mut channel_closures = Vec::new(); @@ -7342,11 +7546,11 @@ where match peer_channels.entry(channel.get_counterparty_node_id()) { hash_map::Entry::Occupied(mut entry) => { let by_id_map = entry.get_mut(); - by_id_map.insert(channel.channel_id(), channel); + by_id_map.insert(channel.channel_id(), Channel::Funded(channel)); }, hash_map::Entry::Vacant(entry) => { let mut by_id_map = HashMap::new(); - by_id_map.insert(channel.channel_id(), channel); + by_id_map.insert(channel.channel_id(), Channel::Funded(channel)); entry.insert(by_id_map); } } @@ -7747,7 +7951,7 @@ where let peer_state_mutex = per_peer_state.get(peer_node_id).unwrap(); let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; - if let Some(channel) = peer_state.channel_by_id.get_mut(&previous_channel_id) { + if let Some(Channel::Funded(channel)) = peer_state.channel_by_id.get_mut(&previous_channel_id) { channel.claim_htlc_while_disconnected_dropping_mon_update(claimable_htlc.prev_hop.htlc_id, payment_preimage, &args.logger); } }