diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index e795ecb9fba..350415af24c 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -88,10 +88,11 @@ be covered by functional tests. When refactoring, structure your PR to make it easy to review and don't hesitate to split it into multiple small, focused PRs. -The Minimum Supported Rust Version (MSRV) currently is 1.41.1 (enforced by -our GitHub Actions). Also, the compatibility for LDK object serialization is -currently ensured back to and including crate version 0.0.99 (see the -[changelog](CHANGELOG.md)). +The Minimum Supported Rust Version (MSRV) currently is 1.48.0 (enforced by +our GitHub Actions). We support reading serialized LDK objects written by any +version of LDK 0.0.99 and above. We support LDK versions 0.0.113 and above +reading serialized LDK objects written by modern LDK. Any expected issues with +upgrades or downgrades should be mentioned in the [changelog](CHANGELOG.md). Commits should cover both the issue fixed and the solution's rationale. These [guidelines](https://chris.beams.io/posts/git-commit/) should be kept in mind. diff --git a/fuzz/src/peer_crypt.rs b/fuzz/src/peer_crypt.rs index f6df392fcef..3acf4d664f6 100644 --- a/fuzz/src/peer_crypt.rs +++ b/fuzz/src/peer_crypt.rs @@ -7,7 +7,7 @@ // You may not use this file except in accordance with one or both of these // licenses. -use lightning::ln::peer_channel_encryptor::PeerChannelEncryptor; +use lightning::ln::peer_channel_encryptor::{PeerChannelEncryptor, MessageBuf}; use lightning::util::test_utils::TestNodeSigner; use bitcoin::secp256k1::{Secp256k1, PublicKey, SecretKey}; @@ -74,15 +74,17 @@ pub fn do_test(data: &[u8]) { assert!(crypter.is_ready_for_encryption()); crypter }; + let mut buf = [0; 65536 + 16]; loop { if get_slice!(1)[0] == 0 { - crypter.encrypt_buffer(get_slice!(slice_to_be16(get_slice!(2)))); + crypter.encrypt_buffer(MessageBuf::from_encoded(&get_slice!(slice_to_be16(get_slice!(2))))); } else { let len = match crypter.decrypt_length_header(get_slice!(16+2)) { Ok(len) => len, Err(_) => return, }; - match crypter.decrypt_message(get_slice!(len as usize + 16)) { + buf.copy_from_slice(&get_slice!(len as usize + 16)); + match crypter.decrypt_message(&mut buf[..len as usize + 16]) { Ok(_) => {}, Err(_) => return, } diff --git a/lightning-net-tokio/src/lib.rs b/lightning-net-tokio/src/lib.rs index bac18b2b398..4483ae74256 100644 --- a/lightning-net-tokio/src/lib.rs +++ b/lightning-net-tokio/src/lib.rs @@ -422,7 +422,11 @@ const SOCK_WAKER_VTABLE: task::RawWakerVTable = task::RawWakerVTable::new(clone_socket_waker, wake_socket_waker, wake_socket_waker_by_ref, drop_socket_waker); fn clone_socket_waker(orig_ptr: *const ()) -> task::RawWaker { - write_avail_to_waker(orig_ptr as *const mpsc::Sender<()>) + let new_waker = unsafe { Arc::from_raw(orig_ptr as *const mpsc::Sender<()>) }; + let res = write_avail_to_waker(&new_waker); + // Don't decrement the refcount when dropping new_waker by turning it back `into_raw`. + let _ = Arc::into_raw(new_waker); + res } // When waking, an error should be fine. Most likely we got two send_datas in a row, both of which // failed to fully write, but we only need to call write_buffer_space_avail() once. Otherwise, the @@ -435,16 +439,15 @@ fn wake_socket_waker(orig_ptr: *const ()) { } fn wake_socket_waker_by_ref(orig_ptr: *const ()) { let sender_ptr = orig_ptr as *const mpsc::Sender<()>; - let sender = unsafe { (*sender_ptr).clone() }; + let sender = unsafe { &*sender_ptr }; let _ = sender.try_send(()); } fn drop_socket_waker(orig_ptr: *const ()) { - let _orig_box = unsafe { Box::from_raw(orig_ptr as *mut mpsc::Sender<()>) }; - // _orig_box is now dropped + let _orig_arc = unsafe { Arc::from_raw(orig_ptr as *mut mpsc::Sender<()>) }; + // _orig_arc is now dropped } -fn write_avail_to_waker(sender: *const mpsc::Sender<()>) -> task::RawWaker { - let new_box = Box::leak(Box::new(unsafe { (*sender).clone() })); - let new_ptr = new_box as *const mpsc::Sender<()>; +fn write_avail_to_waker(sender: &Arc>) -> task::RawWaker { + let new_ptr = Arc::into_raw(Arc::clone(&sender)); task::RawWaker::new(new_ptr as *const (), &SOCK_WAKER_VTABLE) } @@ -452,12 +455,20 @@ fn write_avail_to_waker(sender: *const mpsc::Sender<()>) -> task::RawWaker { /// type in the template of PeerHandler. pub struct SocketDescriptor { conn: Arc>, + // We store a copy of the mpsc::Sender to wake the read task in an Arc here. While we can + // simply clone the sender and store a copy in each waker, that would require allocating for + // each waker. Instead, we can simply `Arc::clone`, creating a new reference and store the + // pointer in the waker. + write_avail_sender: Arc>, id: u64, } impl SocketDescriptor { fn new(conn: Arc>) -> Self { - let id = conn.lock().unwrap().id; - Self { conn, id } + let (id, write_avail_sender) = { + let us = conn.lock().unwrap(); + (us.id, Arc::new(us.write_avail.clone())) + }; + Self { conn, id, write_avail_sender } } } impl peer_handler::SocketDescriptor for SocketDescriptor { @@ -480,7 +491,7 @@ impl peer_handler::SocketDescriptor for SocketDescriptor { let _ = us.read_waker.try_send(()); } if data.is_empty() { return 0; } - let waker = unsafe { task::Waker::from_raw(write_avail_to_waker(&us.write_avail)) }; + let waker = unsafe { task::Waker::from_raw(write_avail_to_waker(&self.write_avail_sender)) }; let mut ctx = task::Context::from_waker(&waker); let mut written_len = 0; loop { @@ -522,6 +533,7 @@ impl Clone for SocketDescriptor { Self { conn: Arc::clone(&self.conn), id: self.id, + write_avail_sender: Arc::clone(&self.write_avail_sender), } } } diff --git a/lightning/src/blinded_path/utils.rs b/lightning/src/blinded_path/utils.rs index c62b4e6c261..33a2cde8c8f 100644 --- a/lightning/src/blinded_path/utils.rs +++ b/lightning/src/blinded_path/utils.rs @@ -20,7 +20,7 @@ use crate::ln::msgs::DecodeError; use crate::ln::onion_utils; use crate::onion_message::Destination; use crate::util::chacha20poly1305rfc::ChaChaPolyWriteAdapter; -use crate::util::ser::{Readable, VecWriter, Writeable}; +use crate::util::ser::{Readable, Writeable}; use crate::io; use crate::prelude::*; @@ -129,10 +129,8 @@ where /// Encrypt TLV payload to be used as a [`crate::blinded_path::BlindedHop::encrypted_payload`]. fn encrypt_payload(payload: P, encrypted_tlvs_rho: [u8; 32]) -> Vec { - let mut writer = VecWriter(Vec::new()); let write_adapter = ChaChaPolyWriteAdapter::new(encrypted_tlvs_rho, &payload); - write_adapter.write(&mut writer).expect("In-memory writes cannot fail"); - writer.0 + write_adapter.encode() } /// Blinded path encrypted payloads may be padded to ensure they are equal length. diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 52db68c1323..7d5af277417 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -39,7 +39,7 @@ use crate::chain::transaction::{OutPoint, TransactionData}; use crate::sign::{EcdsaChannelSigner, WriteableEcdsaChannelSigner, EntropySource, ChannelSigner, SignerProvider, NodeSigner, Recipient}; use crate::events::ClosureReason; use crate::routing::gossip::NodeId; -use crate::util::ser::{Readable, ReadableArgs, Writeable, Writer, VecWriter}; +use crate::util::ser::{Readable, ReadableArgs, Writeable, Writer}; use crate::util::logger::Logger; use crate::util::errors::APIError; use crate::util::config::{UserConfig, ChannelConfig, LegacyChannelConfig, ChannelHandshakeConfig, ChannelHandshakeLimits, MaxDustHTLCExposure}; @@ -6892,7 +6892,7 @@ impl InboundV1Channel where SP::Target: SignerProvider { } const SERIALIZATION_VERSION: u8 = 3; -const MIN_SERIALIZATION_VERSION: u8 = 2; +const MIN_SERIALIZATION_VERSION: u8 = 3; impl_writeable_tlv_based_enum!(InboundHTLCRemovalReason,; (0, FailRelay), @@ -6972,14 +6972,6 @@ impl Writeable for Channel where SP::Target: SignerProvider { self.context.latest_monitor_update_id.write(writer)?; - let mut key_data = VecWriter(Vec::new()); - // TODO (taproot|arik): Introduce serialization distinction for non-ECDSA signers. - self.context.holder_signer.as_ecdsa().expect("Only ECDSA signers may be serialized").write(&mut key_data)?; - assert!(key_data.0.len() < core::usize::MAX); - assert!(key_data.0.len() < core::u32::MAX as usize); - (key_data.0.len() as u32).write(writer)?; - writer.write_all(&key_data.0[..])?; - // Write out the old serialization for shutdown_pubkey for backwards compatibility, if // deserialized from that format. match self.context.shutdown_scriptpubkey.as_ref().and_then(|script| script.as_legacy_pubkey()) { diff --git a/lightning/src/ln/peer_channel_encryptor.rs b/lightning/src/ln/peer_channel_encryptor.rs index 071ef8a9dd6..8569fa60ffe 100644 --- a/lightning/src/ln/peer_channel_encryptor.rs +++ b/lightning/src/ln/peer_channel_encryptor.rs @@ -34,6 +34,10 @@ use core::ops::Deref; /// and [BOLT-1](https://github.com/lightning/bolts/blob/master/01-messaging.md#lightning-message-format): pub const LN_MAX_MSG_LEN: usize = ::core::u16::MAX as usize; // Must be equal to 65535 +/// The (rough) size buffer to pre-allocate when encoding a message. Messages should reliably be +/// smaller than this size by at least 32 bytes or so. +pub const MSG_BUF_ALLOC_SIZE: usize = 2048; + // Sha256("Noise_XK_secp256k1_ChaChaPoly_SHA256") const NOISE_CK: [u8; 32] = [0x26, 0x40, 0xf5, 0x2e, 0xeb, 0xcd, 0x9e, 0x88, 0x29, 0x58, 0x95, 0x1c, 0x79, 0x42, 0x50, 0xee, 0xdb, 0x28, 0x00, 0x2c, 0x05, 0xd7, 0xdc, 0x2e, 0xa0, 0xf1, 0x95, 0x40, 0x60, 0x42, 0xca, 0xf1]; // Sha256(NOISE_CK || "lightning") @@ -165,6 +169,18 @@ impl PeerChannelEncryptor { res.extend_from_slice(&tag); } + fn decrypt_in_place_with_ad(inout: &mut [u8], n: u64, key: &[u8; 32], h: &[u8]) -> Result<(), LightningError> { + let mut nonce = [0; 12]; + nonce[4..].copy_from_slice(&n.to_le_bytes()[..]); + + let mut chacha = ChaCha20Poly1305RFC::new(key, &nonce, h); + let (inout, tag) = inout.split_at_mut(inout.len() - 16); + if chacha.check_decrypt_in_place(inout, tag).is_err() { + return Err(LightningError{err: "Bad MAC".to_owned(), action: msgs::ErrorAction::DisconnectPeer{ msg: None }}); + } + Ok(()) + } + #[inline] fn decrypt_with_ad(res: &mut[u8], n: u64, key: &[u8; 32], h: &[u8], cyphertext: &[u8]) -> Result<(), LightningError> { let mut nonce = [0; 12]; @@ -411,16 +427,20 @@ impl PeerChannelEncryptor { Ok(self.their_node_id.unwrap().clone()) } - /// Encrypts the given pre-serialized message, returning the encrypted version. - /// panics if msg.len() > 65535 or Noise handshake has not finished. - pub fn encrypt_buffer(&mut self, msg: &[u8]) -> Vec { - if msg.len() > LN_MAX_MSG_LEN { + /// Builds sendable bytes for a message. + /// + /// `msgbuf` must begin with 16 + 2 dummy/0 bytes, which will be filled with the encrypted + /// message length and its MAC. It should then be followed by the message bytes themselves + /// (including the two byte message type). + /// + /// For effeciency, the [`Vec::capacity`] should be at least 16 bytes larger than the + /// [`Vec::len`], to avoid reallocating for the message MAC, which will be appended to the vec. + fn encrypt_message_with_header_0s(&mut self, msgbuf: &mut Vec) { + let msg_len = msgbuf.len() - 16 - 2; + if msg_len > LN_MAX_MSG_LEN { panic!("Attempted to encrypt message longer than 65535 bytes!"); } - let mut res = Vec::with_capacity(msg.len() + 16*2 + 2); - res.resize(msg.len() + 16*2 + 2, 0); - match self.noise_state { NoiseState::Finished { ref mut sk, ref mut sn, ref mut sck, rk: _, rn: _, rck: _ } => { if *sn >= 1000 { @@ -430,16 +450,21 @@ impl PeerChannelEncryptor { *sn = 0; } - Self::encrypt_with_ad(&mut res[0..16+2], *sn, sk, &[0; 0], &(msg.len() as u16).to_be_bytes()); + Self::encrypt_with_ad(&mut msgbuf[0..16+2], *sn, sk, &[0; 0], &(msg_len as u16).to_be_bytes()); *sn += 1; - Self::encrypt_with_ad(&mut res[16+2..], *sn, sk, &[0; 0], msg); + Self::encrypt_in_place_with_ad(msgbuf, 16+2, *sn, sk, &[0; 0]); *sn += 1; }, _ => panic!("Tried to encrypt a message prior to noise handshake completion"), } + } - res + /// Encrypts the given pre-serialized message, returning the encrypted version. + /// panics if msg.len() > 65535 or Noise handshake has not finished. + pub fn encrypt_buffer(&mut self, mut msg: MessageBuf) -> Vec { + self.encrypt_message_with_header_0s(&mut msg.0); + msg.0 } /// Encrypts the given message, returning the encrypted version. @@ -448,33 +473,11 @@ impl PeerChannelEncryptor { pub fn encrypt_message(&mut self, message: &M) -> Vec { // Allocate a buffer with 2KB, fitting most common messages. Reserve the first 16+2 bytes // for the 2-byte message type prefix and its MAC. - let mut res = VecWriter(Vec::with_capacity(2048)); + let mut res = VecWriter(Vec::with_capacity(MSG_BUF_ALLOC_SIZE)); res.0.resize(16 + 2, 0); wire::write(message, &mut res).expect("In-memory messages must never fail to serialize"); - let msg_len = res.0.len() - 16 - 2; - if msg_len > LN_MAX_MSG_LEN { - panic!("Attempted to encrypt message longer than 65535 bytes!"); - } - - match self.noise_state { - NoiseState::Finished { ref mut sk, ref mut sn, ref mut sck, rk: _, rn: _, rck: _ } => { - if *sn >= 1000 { - let (new_sck, new_sk) = hkdf_extract_expand_twice(sck, sk); - *sck = new_sck; - *sk = new_sk; - *sn = 0; - } - - Self::encrypt_with_ad(&mut res.0[0..16+2], *sn, sk, &[0; 0], &(msg_len as u16).to_be_bytes()); - *sn += 1; - - Self::encrypt_in_place_with_ad(&mut res.0, 16+2, *sn, sk, &[0; 0]); - *sn += 1; - }, - _ => panic!("Tried to encrypt a message prior to noise handshake completion"), - } - + self.encrypt_message_with_header_0s(&mut res.0); res.0 } @@ -501,21 +504,20 @@ impl PeerChannelEncryptor { } } - /// Decrypts the given message. + /// Decrypts the given message up to msg.len() - 16. Bytes after msg.len() - 16 will be left + /// undefined (as they contain the Poly1305 tag bytes). + /// /// panics if msg.len() > 65535 + 16 - pub fn decrypt_message(&mut self, msg: &[u8]) -> Result, LightningError> { + pub fn decrypt_message(&mut self, msg: &mut [u8]) -> Result<(), LightningError> { if msg.len() > LN_MAX_MSG_LEN + 16 { panic!("Attempted to decrypt message longer than 65535 + 16 bytes!"); } match self.noise_state { NoiseState::Finished { sk: _, sn: _, sck: _, ref rk, ref mut rn, rck: _ } => { - let mut res = Vec::with_capacity(msg.len() - 16); - res.resize(msg.len() - 16, 0); - Self::decrypt_with_ad(&mut res[..], *rn, rk, &[0; 0], msg)?; + Self::decrypt_in_place_with_ad(&mut msg[..], *rn, rk, &[0; 0])?; *rn += 1; - - Ok(res) + Ok(()) }, _ => panic!("Tried to decrypt a message prior to noise handshake completion"), } @@ -542,9 +544,30 @@ impl PeerChannelEncryptor { } } +/// A buffer which stores an encoded message (including the two message-type bytes) with some +/// padding to allow for future encryption/MACing. +pub struct MessageBuf(Vec); +impl MessageBuf { + /// Creates a new buffer from an encoded message (i.e. the two message-type bytes followed by + /// the message contents). + /// + /// Panics if the message is longer than 2^16. + pub fn from_encoded(encoded_msg: &[u8]) -> Self { + if encoded_msg.len() > LN_MAX_MSG_LEN { + panic!("Attempted to encrypt message longer than 65535 bytes!"); + } + // In addition to the message (continaing the two message type bytes), we also have to add + // the message length header (and its MAC) and the message MAC. + let mut res = Vec::with_capacity(encoded_msg.len() + 16*2 + 2); + res.resize(encoded_msg.len() + 16 + 2, 0); + res[16 + 2..].copy_from_slice(&encoded_msg); + Self(res) + } +} + #[cfg(test)] mod tests { - use super::LN_MAX_MSG_LEN; + use super::{MessageBuf, LN_MAX_MSG_LEN}; use bitcoin::secp256k1::{PublicKey, SecretKey}; use bitcoin::secp256k1::Secp256k1; @@ -760,12 +783,11 @@ mod tests { for i in 0..1005 { let msg = [0x68, 0x65, 0x6c, 0x6c, 0x6f]; - let res = outbound_peer.encrypt_buffer(&msg); + let mut res = outbound_peer.encrypt_buffer(MessageBuf::from_encoded(&msg)); assert_eq!(res.len(), 5 + 2*16 + 2); let len_header = res[0..2+16].to_vec(); assert_eq!(inbound_peer.decrypt_length_header(&len_header[..]).unwrap() as usize, msg.len()); - assert_eq!(inbound_peer.decrypt_message(&res[2+16..]).unwrap()[..], msg[..]); if i == 0 { assert_eq!(res, hex::decode("cf2b30ddf0cf3f80e7c35a6e6730b59fe802473180f396d88a8fb0db8cbcf25d2f214cf9ea1d95").unwrap()); @@ -780,6 +802,9 @@ mod tests { } else if i == 1001 { assert_eq!(res, hex::decode("2ecd8c8a5629d0d02ab457a0fdd0f7b90a192cd46be5ecb6ca570bfc5e268338b1a16cf4ef2d36").unwrap()); } + + inbound_peer.decrypt_message(&mut res[2+16..]).unwrap(); + assert_eq!(res[2 + 16..res.len() - 16], msg[..]); } } @@ -794,7 +819,7 @@ mod tests { fn max_message_len_encryption() { let mut outbound_peer = get_outbound_peer_for_initiator_test_vectors(); let msg = [4u8; LN_MAX_MSG_LEN + 1]; - outbound_peer.encrypt_buffer(&msg); + outbound_peer.encrypt_buffer(MessageBuf::from_encoded(&msg)); } #[test] @@ -803,7 +828,7 @@ mod tests { let mut inbound_peer = get_inbound_peer_for_test_vectors(); // MSG should not exceed LN_MAX_MSG_LEN + 16 - let msg = [4u8; LN_MAX_MSG_LEN + 17]; - inbound_peer.decrypt_message(&msg).unwrap(); + let mut msg = [4u8; LN_MAX_MSG_LEN + 17]; + inbound_peer.decrypt_message(&mut msg).unwrap(); } } diff --git a/lightning/src/ln/peer_handler.rs b/lightning/src/ln/peer_handler.rs index ba3a733d225..006538651a8 100644 --- a/lightning/src/ln/peer_handler.rs +++ b/lightning/src/ln/peer_handler.rs @@ -27,7 +27,7 @@ use crate::ln::msgs::{ChannelMessageHandler, LightningError, SocketAddress, Onio #[cfg(not(c_bindings))] use crate::ln::channelmanager::{SimpleArcChannelManager, SimpleRefChannelManager}; use crate::util::ser::{VecWriter, Writeable, Writer}; -use crate::ln::peer_channel_encryptor::{PeerChannelEncryptor,NextNoiseStep}; +use crate::ln::peer_channel_encryptor::{PeerChannelEncryptor, NextNoiseStep, MessageBuf, MSG_BUF_ALLOC_SIZE}; use crate::ln::wire; use crate::ln::wire::{Encode, Type}; #[cfg(not(c_bindings))] @@ -40,7 +40,7 @@ use crate::util::string::PrintableString; use crate::prelude::*; use crate::io; -use alloc::collections::LinkedList; +use alloc::collections::VecDeque; use crate::sync::{Arc, Mutex, MutexGuard, FairRwLock}; use core::sync::atomic::{AtomicBool, AtomicU32, AtomicI32, Ordering}; use core::{cmp, hash, fmt, mem}; @@ -489,13 +489,13 @@ struct Peer { their_features: Option, their_socket_address: Option, - pending_outbound_buffer: LinkedList>, + pending_outbound_buffer: VecDeque>, pending_outbound_buffer_first_msg_offset: usize, /// Queue gossip broadcasts separately from `pending_outbound_buffer` so we can easily /// prioritize channel messages over them. /// /// Note that these messages are *not* encrypted/MAC'd, and are only serialized. - gossip_broadcast_buffer: LinkedList>, + gossip_broadcast_buffer: VecDeque, awaiting_write_event: bool, pending_read_buffer: Vec, @@ -785,7 +785,7 @@ impl From for MessageHandlingError { macro_rules! encode_msg { ($msg: expr) => {{ - let mut buffer = VecWriter(Vec::new()); + let mut buffer = VecWriter(Vec::with_capacity(MSG_BUF_ALLOC_SIZE)); wire::write($msg, &mut buffer).unwrap(); buffer.0 }} @@ -997,9 +997,9 @@ impl>(); + let large_capacity = peer.pending_outbound_buffer.capacity() > 4096 / VEC_SIZE; + let lots_of_slack = peer.pending_outbound_buffer.len() + < peer.pending_outbound_buffer.capacity() / 2; + if large_capacity && lots_of_slack { + peer.pending_outbound_buffer.shrink_to_fit(); + } } else { peer.awaiting_write_event = true; } @@ -1244,8 +1251,9 @@ impl) { + fn enqueue_encoded_gossip_broadcast(&self, peer: &mut Peer, encoded_message: MessageBuf) { peer.msgs_sent_since_pong += 1; + debug_assert!(peer.gossip_broadcast_buffer.len() <= OUTBOUND_BUFFER_LIMIT_DROP_GOSSIP); peer.gossip_broadcast_buffer.push_back(encoded_message); } @@ -1402,17 +1410,18 @@ impl= 2); + debug_assert!(peer.pending_read_buffer.len() >= 2 + 16); + try_potential_handleerror!(peer, + peer.channel_encryptor.decrypt_message(&mut peer.pending_read_buffer[..])); + + let mut reader = io::Cursor::new(&peer.pending_read_buffer[..peer.pending_read_buffer.len() - 16]); + let message_result = wire::read(&mut reader, &*self.message_handler.custom_message_handler); // Reset read buffer if peer.pending_read_buffer.capacity() > 8192 { peer.pending_read_buffer = Vec::new(); } peer.pending_read_buffer.resize(18, 0); peer.pending_read_is_header = true; - let mut reader = io::Cursor::new(&msg_data[..]); - let message_result = wire::read(&mut reader, &*self.message_handler.custom_message_handler); let message = match message_result { Ok(x) => x, Err(e) => { @@ -1791,7 +1800,7 @@ impl { @@ -1818,7 +1827,7 @@ impl { @@ -1840,7 +1849,7 @@ impl debug_assert!(false, "We shouldn't attempt to forward anything but gossip messages"), diff --git a/lightning/src/ln/script.rs b/lightning/src/ln/script.rs index 6c1d2102400..079416eae77 100644 --- a/lightning/src/ln/script.rs +++ b/lightning/src/ln/script.rs @@ -44,10 +44,6 @@ impl Writeable for ShutdownScript { fn write(&self, w: &mut W) -> Result<(), io::Error> { self.0.write(w) } - - fn serialized_length(&self) -> usize { - self.0.serialized_length() - } } impl Readable for ShutdownScript { diff --git a/lightning/src/routing/gossip.rs b/lightning/src/routing/gossip.rs index c51180fee73..ff8b084b78a 100644 --- a/lightning/src/routing/gossip.rs +++ b/lightning/src/routing/gossip.rs @@ -412,11 +412,17 @@ macro_rules! get_pubkey_from_node_id { } } +fn message_sha256d_hash(msg: &M) -> Sha256dHash { + let mut engine = Sha256dHash::engine(); + msg.write(&mut engine).expect("In-memory structs should not fail to serialize"); + Sha256dHash::from_engine(engine) +} + /// Verifies the signature of a [`NodeAnnouncement`]. /// /// Returns an error if it is invalid. pub fn verify_node_announcement(msg: &NodeAnnouncement, secp_ctx: &Secp256k1) -> Result<(), LightningError> { - let msg_hash = hash_to_message!(&Sha256dHash::hash(&msg.contents.encode()[..])[..]); + let msg_hash = hash_to_message!(&message_sha256d_hash(&msg.contents)[..]); secp_verify_sig!(secp_ctx, &msg_hash, &msg.signature, &get_pubkey_from_node_id!(msg.contents.node_id, "node_announcement"), "node_announcement"); Ok(()) @@ -426,7 +432,7 @@ pub fn verify_node_announcement(msg: &NodeAnnouncement, secp_ct /// /// Returns an error if one of the signatures is invalid. pub fn verify_channel_announcement(msg: &ChannelAnnouncement, secp_ctx: &Secp256k1) -> Result<(), LightningError> { - let msg_hash = hash_to_message!(&Sha256dHash::hash(&msg.contents.encode()[..])[..]); + let msg_hash = hash_to_message!(&message_sha256d_hash(&msg.contents)[..]); secp_verify_sig!(secp_ctx, &msg_hash, &msg.node_signature_1, &get_pubkey_from_node_id!(msg.contents.node_id_1, "channel_announcement"), "channel_announcement"); secp_verify_sig!(secp_ctx, &msg_hash, &msg.node_signature_2, &get_pubkey_from_node_id!(msg.contents.node_id_2, "channel_announcement"), "channel_announcement"); secp_verify_sig!(secp_ctx, &msg_hash, &msg.bitcoin_signature_1, &get_pubkey_from_node_id!(msg.contents.bitcoin_key_1, "channel_announcement"), "channel_announcement"); @@ -1312,14 +1318,16 @@ impl ReadableArgs for NetworkGraph where L::Target: Logger { let chain_hash: ChainHash = Readable::read(reader)?; let channels_count: u64 = Readable::read(reader)?; - let mut channels = IndexedMap::new(); + // In Nov, 2023 there were about 15,000 nodes; we cap allocations to 1.5x that. + let mut channels = IndexedMap::with_capacity(cmp::min(channels_count as usize, 22500)); for _ in 0..channels_count { let chan_id: u64 = Readable::read(reader)?; let chan_info = Readable::read(reader)?; channels.insert(chan_id, chan_info); } let nodes_count: u64 = Readable::read(reader)?; - let mut nodes = IndexedMap::new(); + // In Nov, 2023 there were about 69K channels; we cap allocations to 1.5x that. + let mut nodes = IndexedMap::with_capacity(cmp::min(nodes_count as usize, 103500)); for _ in 0..nodes_count { let node_id = Readable::read(reader)?; let node_info = Readable::read(reader)?; @@ -1967,7 +1975,7 @@ impl NetworkGraph where L::Target: Logger { } } } - let msg_hash = hash_to_message!(&Sha256dHash::hash(&msg.encode()[..])[..]); + let msg_hash = hash_to_message!(&message_sha256d_hash(&msg)[..]); if msg.flags & 1 == 1 { check_update_latest!(channel.two_to_one); if let Some(sig) = sig { diff --git a/lightning/src/sign/type_resolver.rs b/lightning/src/sign/type_resolver.rs index 73d2cceb3e8..f76650982c2 100644 --- a/lightning/src/sign/type_resolver.rs +++ b/lightning/src/sign/type_resolver.rs @@ -18,6 +18,7 @@ impl ChannelSignerType{ } } + #[allow(unused)] pub(crate) fn as_ecdsa(&self) -> Option<&ECS> { match self { ChannelSignerType::Ecdsa(ecs) => Some(ecs) diff --git a/lightning/src/util/chacha20poly1305rfc.rs b/lightning/src/util/chacha20poly1305rfc.rs index a5bec2c82b8..d5792e0ac2b 100644 --- a/lightning/src/util/chacha20poly1305rfc.rs +++ b/lightning/src/util/chacha20poly1305rfc.rs @@ -122,10 +122,15 @@ mod real_chachapoly { } } - // Decrypt in place, without checking the tag. Use `finish_and_check_tag` to check it - // later when decryption finishes. - // - // Should never be `pub` because the public API should always enforce tag checking. + pub fn check_decrypt_in_place(&mut self, input_output: &mut [u8], tag: &[u8]) -> Result<(), ()> { + self.decrypt_in_place(input_output); + if self.finish_and_check_tag(tag) { Ok(()) } else { Err(()) } + } + + /// Decrypt in place, without checking the tag. Use `finish_and_check_tag` to check it + /// later when decryption finishes. + /// + /// Should never be `pub` because the public API should always enforce tag checking. pub(super) fn decrypt_in_place(&mut self, input_output: &mut [u8]) { debug_assert!(self.finished == false); self.mac.input(input_output); @@ -133,8 +138,8 @@ mod real_chachapoly { self.cipher.process_in_place(input_output); } - // If we were previously decrypting with `decrypt_in_place`, this method must be used to finish - // decrypting and check the tag. Returns whether or not the tag is valid. + /// If we were previously decrypting with `just_decrypt_in_place`, this method must be used + /// to check the tag. Returns whether or not the tag is valid. pub(super) fn finish_and_check_tag(&mut self, tag: &[u8]) -> bool { debug_assert!(self.finished == false); self.finished = true; @@ -313,6 +318,11 @@ mod fuzzy_chachapoly { true } + pub fn check_decrypt_in_place(&mut self, input_output: &mut [u8], tag: &[u8]) -> Result<(), ()> { + self.decrypt_in_place(input_output); + if self.finish_and_check_tag(tag) { Ok(()) } else { Err(()) } + } + pub(super) fn decrypt_in_place(&mut self, _input: &mut [u8]) { assert!(self.finished == false); } diff --git a/lightning/src/util/indexed_map.rs b/lightning/src/util/indexed_map.rs index bb17d3450ee..39565f048c0 100644 --- a/lightning/src/util/indexed_map.rs +++ b/lightning/src/util/indexed_map.rs @@ -39,6 +39,14 @@ impl IndexedMap { } } + /// Constructs a new, empty map with the given capacity pre-allocated + pub fn with_capacity(capacity: usize) -> Self { + Self { + map: HashMap::with_capacity(capacity), + keys: Vec::with_capacity(capacity), + } + } + #[inline(always)] /// Fetches the element with the given `key`, if one exists. pub fn get(&self, key: &K) -> Option<&V> { diff --git a/lightning/src/util/ser.rs b/lightning/src/util/ser.rs index 85b991c61d4..64d59da0d1c 100644 --- a/lightning/src/util/ser.rs +++ b/lightning/src/util/ser.rs @@ -199,8 +199,14 @@ pub trait Writeable { /// Writes `self` out to a `Vec`. fn encode(&self) -> Vec { - let mut msg = VecWriter(Vec::new()); + let len = self.serialized_length(); + let mut msg = VecWriter(Vec::with_capacity(len)); self.write(&mut msg).unwrap(); + // Note that objects with interior mutability may change size between when we called + // serialized_length and when we called write. That's okay, but shouldn't happen during + // testing as most of our tests are not threaded. + #[cfg(test)] + debug_assert_eq!(len, msg.0.len()); msg.0 } @@ -211,6 +217,7 @@ pub trait Writeable { 0u16.write(&mut msg).unwrap(); self.write(&mut msg).unwrap(); let len = msg.0.len(); + debug_assert_eq!(len - 2, self.serialized_length()); msg.0[..2].copy_from_slice(&(len as u16 - 2).to_be_bytes()); msg.0 } diff --git a/pending_changelog/113-channel-ser-compat.txt b/pending_changelog/113-channel-ser-compat.txt new file mode 100644 index 00000000000..9bba9fd1c55 --- /dev/null +++ b/pending_changelog/113-channel-ser-compat.txt @@ -0,0 +1,4 @@ + * `ChannelManager`s written with LDK 0.0.119 are no longer readable by versions + of LDK prior to 0.0.113. Users wishing to downgrade to LDK 0.0.112 or before + can read an 0.0.119-serialized `ChannelManager` with a version of LDK from + 0.0.113 to 0.0.118, re-serialize it, and then downgrade.