Skip to content

Pre-C-Bindings Cleanups #3 #676

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
6 changes: 3 additions & 3 deletions lightning/src/chain/keysinterface.rs
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ use util::ser::{Writeable, Writer, Readable};

use ln::chan_utils;
use ln::chan_utils::{HTLCOutputInCommitment, make_funding_redeemscript, ChannelPublicKeys, LocalCommitmentTransaction, PreCalculatedTxCreationKeys};
use ln::msgs;
use ln::msgs::UnsignedChannelAnnouncement;
Comment on lines -36 to +37
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Could you open an issue to fix the underlying problem in the bindings generation? Sometimes using the module prefix is preferable.

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.


use std::sync::atomic::{AtomicUsize, Ordering};
use std::io::Error;
Expand Down Expand Up @@ -316,7 +316,7 @@ pub trait ChannelKeys : Send+Clone {
/// Note that if this fails or is rejected, the channel will not be publicly announced and
/// our counterparty may (though likely will not) close the channel on us for violating the
/// protocol.
fn sign_channel_announcement<T: secp256k1::Signing>(&self, msg: &msgs::UnsignedChannelAnnouncement, secp_ctx: &Secp256k1<T>) -> Result<Signature, ()>;
fn sign_channel_announcement<T: secp256k1::Signing>(&self, msg: &UnsignedChannelAnnouncement, secp_ctx: &Secp256k1<T>) -> Result<Signature, ()>;

/// Set the remote channel basepoints and remote/local to_self_delay.
/// This is done immediately on incoming channels and as soon as the channel is accepted on outgoing channels.
Expand Down Expand Up @@ -584,7 +584,7 @@ impl ChannelKeys for InMemoryChannelKeys {
Ok(secp_ctx.sign(&sighash, &self.funding_key))
}

fn sign_channel_announcement<T: secp256k1::Signing>(&self, msg: &msgs::UnsignedChannelAnnouncement, secp_ctx: &Secp256k1<T>) -> Result<Signature, ()> {
fn sign_channel_announcement<T: secp256k1::Signing>(&self, msg: &UnsignedChannelAnnouncement, secp_ctx: &Secp256k1<T>) -> Result<Signature, ()> {
let msghash = hash_to_message!(&Sha256dHash::hash(&msg.encode()[..])[..]);
Ok(secp_ctx.sign(&msghash, &self.funding_key))
}
Expand Down
3 changes: 2 additions & 1 deletion lightning/src/ln/chan_utils.rs
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@ use util::byte_utils;

use bitcoin::secp256k1::key::{SecretKey, PublicKey};
use bitcoin::secp256k1::{Secp256k1, Signature};
use bitcoin::secp256k1::Error as SecpError;
use bitcoin::secp256k1;

use std::{cmp, mem};
Expand Down Expand Up @@ -357,7 +358,7 @@ impl_writeable!(ChannelPublicKeys, 33*5, {

impl TxCreationKeys {
/// Create a new TxCreationKeys from channel base points and the per-commitment point
pub fn new<T: secp256k1::Signing + secp256k1::Verification>(secp_ctx: &Secp256k1<T>, per_commitment_point: &PublicKey, a_delayed_payment_base: &PublicKey, a_htlc_base: &PublicKey, b_revocation_base: &PublicKey, b_htlc_base: &PublicKey) -> Result<TxCreationKeys, secp256k1::Error> {
pub fn new<T: secp256k1::Signing + secp256k1::Verification>(secp_ctx: &Secp256k1<T>, per_commitment_point: &PublicKey, a_delayed_payment_base: &PublicKey, a_htlc_base: &PublicKey, b_revocation_base: &PublicKey, b_htlc_base: &PublicKey) -> Result<TxCreationKeys, SecpError> {
Ok(TxCreationKeys {
per_commitment_point: per_commitment_point.clone(),
revocation_key: derive_public_revocation_key(&secp_ctx, &per_commitment_point, &b_revocation_base)?,
Expand Down
16 changes: 9 additions & 7 deletions lightning/src/ln/channelmanager.rs
Original file line number Diff line number Diff line change
Expand Up @@ -42,10 +42,12 @@ use ln::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpd
use ln::features::{InitFeatures, NodeFeatures};
use routing::router::{Route, RouteHop};
use ln::msgs;
use ln::msgs::NetAddress;
use ln::onion_utils;
use ln::msgs::{ChannelMessageHandler, DecodeError, LightningError, OptionalField};
use chain::keysinterface::{ChannelKeys, KeysInterface, KeysManager, InMemoryChannelKeys};
use util::config::UserConfig;
use util::events::{Event, EventsProvider, MessageSendEvent, MessageSendEventsProvider};
use util::{byte_utils, events};
use util::ser::{Readable, ReadableArgs, MaybeReadable, Writeable, Writer};
use util::chacha20::{ChaCha20, ChaChaReader};
Expand Down Expand Up @@ -312,7 +314,7 @@ pub(super) struct ChannelHolder<ChanSigner: ChannelKeys> {
claimable_htlcs: HashMap<(PaymentHash, Option<PaymentSecret>), Vec<ClaimableHTLC>>,
/// Messages to send to peers - pushed to in the same lock that they are generated in (except
/// for broadcast messages, where ordering isn't as strict).
pub(super) pending_msg_events: Vec<events::MessageSendEvent>,
pub(super) pending_msg_events: Vec<MessageSendEvent>,
}

/// State we hold per-peer. In the future we should put channels in here, but for now we only hold
Expand Down Expand Up @@ -1483,7 +1485,7 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
// be absurd. We ensure this by checking that at least 500 (our stated public contract on when
// broadcast_node_announcement panics) of the maximum-length addresses would fit in a 64KB
// message...
const HALF_MESSAGE_IS_ADDRS: u32 = ::std::u16::MAX as u32 / (msgs::NetAddress::MAX_LEN as u32 + 1) / 2;
const HALF_MESSAGE_IS_ADDRS: u32 = ::std::u16::MAX as u32 / (NetAddress::MAX_LEN as u32 + 1) / 2;
#[deny(const_err)]
#[allow(dead_code)]
// ...by failing to compile if the number of addresses that would be half of a message is
Expand All @@ -1503,7 +1505,7 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
/// only Tor Onion addresses.
///
/// Panics if addresses is absurdly large (more than 500).
pub fn broadcast_node_announcement(&self, rgb: [u8; 3], alias: [u8; 32], addresses: Vec<msgs::NetAddress>) {
pub fn broadcast_node_announcement(&self, rgb: [u8; 3], alias: [u8; 32], addresses: Vec<NetAddress>) {
let _ = self.total_consistency_lock.read().unwrap();

if addresses.len() > 500 {
Expand Down Expand Up @@ -3010,14 +3012,14 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
}
}

impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> events::MessageSendEventsProvider for ChannelManager<ChanSigner, M, T, K, F, L>
impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> MessageSendEventsProvider for ChannelManager<ChanSigner, M, T, K, F, L>
where M::Target: ManyChannelMonitor<Keys=ChanSigner>,
T::Target: BroadcasterInterface,
K::Target: KeysInterface<ChanKeySigner = ChanSigner>,
F::Target: FeeEstimator,
L::Target: Logger,
{
fn get_and_clear_pending_msg_events(&self) -> Vec<events::MessageSendEvent> {
fn get_and_clear_pending_msg_events(&self) -> Vec<MessageSendEvent> {
//TODO: This behavior should be documented. It's non-intuitive that we query
// ChannelMonitors when clearing other events.
self.process_pending_monitor_events();
Expand All @@ -3029,14 +3031,14 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
}
}

impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> events::EventsProvider for ChannelManager<ChanSigner, M, T, K, F, L>
impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> EventsProvider for ChannelManager<ChanSigner, M, T, K, F, L>
where M::Target: ManyChannelMonitor<Keys=ChanSigner>,
T::Target: BroadcasterInterface,
K::Target: KeysInterface<ChanKeySigner = ChanSigner>,
F::Target: FeeEstimator,
L::Target: Logger,
{
fn get_and_clear_pending_events(&self) -> Vec<events::Event> {
fn get_and_clear_pending_events(&self) -> Vec<Event> {
//TODO: This behavior should be documented. It's non-intuitive that we query
// ChannelMonitors when clearing other events.
self.process_pending_monitor_events();
Expand Down
14 changes: 8 additions & 6 deletions lightning/src/ln/channelmonitor.rs
Original file line number Diff line number Diff line change
Expand Up @@ -47,11 +47,13 @@ use chain::keysinterface::{SpendableOutputDescriptor, ChannelKeys};
use util::logger::Logger;
use util::ser::{Readable, MaybeReadable, Writer, Writeable, U48};
use util::{byte_utils, events};
use util::events::Event;

use std::collections::{HashMap, hash_map};
use std::sync::Mutex;
use std::{hash,cmp, mem};
use std::ops::Deref;
use std::io::Error;

/// An update generated by the underlying Channel itself which contains some new information the
/// ChannelMonitor should be made aware of.
Expand Down Expand Up @@ -317,7 +319,7 @@ impl<Key : Send + cmp::Eq + hash::Hash, ChanSigner: ChannelKeys, T: Deref, F: De
L::Target: Logger,
C::Target: ChainWatchInterface,
{
fn get_and_clear_pending_events(&self) -> Vec<events::Event> {
fn get_and_clear_pending_events(&self) -> Vec<Event> {
let mut pending_events = Vec::new();
for chan in self.monitors.lock().unwrap().values_mut() {
pending_events.append(&mut chan.get_and_clear_pending_events());
Expand Down Expand Up @@ -795,7 +797,7 @@ pub struct ChannelMonitor<ChanSigner: ChannelKeys> {
payment_preimages: HashMap<PaymentHash, PaymentPreimage>,

pending_monitor_events: Vec<MonitorEvent>,
pending_events: Vec<events::Event>,
pending_events: Vec<Event>,

// Used to track onchain events, i.e transactions parts of channels confirmed on chain, on which
// we have to take actions once they reach enough confs. Key is a block height timer, i.e we enforce
Expand Down Expand Up @@ -946,7 +948,7 @@ impl<ChanSigner: ChannelKeys + Writeable> ChannelMonitor<ChanSigner> {
/// the "reorg path" (ie disconnecting blocks until you find a common ancestor from both the
/// returned block hash and the the current chain and then reconnecting blocks to get to the
/// best chain) upon deserializing the object!
pub fn write_for_disk<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
pub fn write_for_disk<W: Writer>(&self, writer: &mut W) -> Result<(), Error> {
//TODO: We still write out all the serialization here manually instead of using the fancy
//serialization framework we have, we should migrate things over to it.
writer.write_all(&[SERIALIZATION_VERSION; 1])?;
Expand Down Expand Up @@ -1452,7 +1454,7 @@ impl<ChanSigner: ChannelKeys> ChannelMonitor<ChanSigner> {
/// This is called by ManyChannelMonitor::get_and_clear_pending_events() and is equivalent to
/// EventsProvider::get_and_clear_pending_events() except that it requires &mut self as we do
/// no internal locking in ChannelMonitors.
pub fn get_and_clear_pending_events(&mut self) -> Vec<events::Event> {
pub fn get_and_clear_pending_events(&mut self) -> Vec<Event> {
let mut ret = Vec::new();
mem::swap(&mut ret, &mut self.pending_events);
ret
Expand Down Expand Up @@ -1957,7 +1959,7 @@ impl<ChanSigner: ChannelKeys> ChannelMonitor<ChanSigner> {
},
OnchainEvent::MaturingOutput { descriptor } => {
log_trace!(logger, "Descriptor {} has got enough confirmations to be passed upstream", log_spendable!(descriptor));
self.pending_events.push(events::Event::SpendableOutputs {
self.pending_events.push(Event::SpendableOutputs {
outputs: vec![descriptor]
});
}
Expand Down Expand Up @@ -2437,7 +2439,7 @@ impl<ChanSigner: ChannelKeys + Readable> Readable for (BlockHash, ChannelMonitor
}

let pending_events_len: u64 = Readable::read(reader)?;
let mut pending_events = Vec::with_capacity(cmp::min(pending_events_len as usize, MAX_ALLOC_SIZE / mem::size_of::<events::Event>()));
let mut pending_events = Vec::with_capacity(cmp::min(pending_events_len as usize, MAX_ALLOC_SIZE / mem::size_of::<Event>()));
for _ in 0..pending_events_len {
if let Some(event) = MaybeReadable::read(reader)? {
pending_events.push(event);
Expand Down
7 changes: 4 additions & 3 deletions lightning/src/routing/network_graph.rs
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,8 @@ use bitcoin::blockdata::opcodes;

use chain::chaininterface::{ChainError, ChainWatchInterface};
use ln::features::{ChannelFeatures, NodeFeatures};
use ln::msgs::{DecodeError, ErrorAction, LightningError, RoutingMessageHandler, NetAddress, OptionalField, MAX_VALUE_MSAT};
use ln::msgs::{DecodeError, ErrorAction, LightningError, RoutingMessageHandler, NetAddress, MAX_VALUE_MSAT};
use ln::msgs::{ChannelAnnouncement, ChannelUpdate, NodeAnnouncement, OptionalField};
use ln::msgs;
use util::ser::{Writeable, Readable, Writer};
use util::logger::Logger;
Expand Down Expand Up @@ -154,7 +155,7 @@ impl<C: Deref + Sync + Send, L: Deref + Sync + Send> RoutingMessageHandler for N
self.network_graph.write().unwrap().update_channel(msg, Some(&self.secp_ctx))
}

fn get_next_channel_announcements(&self, starting_point: u64, batch_amount: u8) -> Vec<(msgs::ChannelAnnouncement, Option<msgs::ChannelUpdate>, Option<msgs::ChannelUpdate>)> {
fn get_next_channel_announcements(&self, starting_point: u64, batch_amount: u8) -> Vec<(ChannelAnnouncement, Option<ChannelUpdate>, Option<ChannelUpdate>)> {
let network_graph = self.network_graph.read().unwrap();
let mut result = Vec::with_capacity(batch_amount as usize);
let mut iter = network_graph.get_channels().range(starting_point..);
Expand Down Expand Up @@ -182,7 +183,7 @@ impl<C: Deref + Sync + Send, L: Deref + Sync + Send> RoutingMessageHandler for N
result
}

fn get_next_node_announcements(&self, starting_point: Option<&PublicKey>, batch_amount: u8) -> Vec<msgs::NodeAnnouncement> {
fn get_next_node_announcements(&self, starting_point: Option<&PublicKey>, batch_amount: u8) -> Vec<NodeAnnouncement> {
let network_graph = self.network_graph.read().unwrap();
let mut result = Vec::with_capacity(batch_amount as usize);
let mut iter = if let Some(pubkey) = starting_point {
Expand Down