diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 79506d179..0598819ff 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -21,7 +21,6 @@ jobs: rust: - nightly - 1.59 - - 1.57 runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 @@ -31,7 +30,6 @@ jobs: toolchain: ${{ matrix.rust }} override: true - name: Run cargo build for stable - if: matrix.rust != 1.57 uses: actions-rs/cargo@v1 with: command: build @@ -42,7 +40,6 @@ jobs: command: build args: --no-default-features - name: Run cargo doc for stable - if: matrix.rust != 1.57 uses: actions-rs/cargo@v1 with: command: doc @@ -53,7 +50,6 @@ jobs: command: doc args: --no-default-features - name: Run cargo test for stable - if: matrix.rust != 1.57 uses: actions-rs/cargo@v1 with: command: test diff --git a/Cargo.toml b/Cargo.toml index 196acdafb..b04202dd5 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -15,9 +15,9 @@ license = "MIT/Apache-2.0" name = "x86_64" readme = "README.md" repository = "https://github.com/rust-osdev/x86_64" -version = "0.14.11" +version = "0.15.0-beta" edition = "2018" -rust-version = "1.57" # Needed to support panic! in const fns +rust-version = "1.59" # Needed to support inline asm and default const generics [dependencies] bit_field = "0.10.1" @@ -28,17 +28,13 @@ rustversion = "1.0.5" [features] default = ["nightly", "instructions"] instructions = [] -nightly = ["const_fn", "step_trait", "abi_x86_interrupt"] +nightly = [ "const_fn", "step_trait", "abi_x86_interrupt", "asm_const" ] abi_x86_interrupt = [] const_fn = [] +asm_const = [] step_trait = [] doc_auto_cfg = [] -# These features are no longer used and only there for backwards compatibility. -external_asm = [] -inline_asm = [] -doc_cfg = [] - [package.metadata.docs.rs] all-features = true diff --git a/Changelog.md b/Changelog.md index 947e1d09b..b52196e6d 100644 --- a/Changelog.md +++ b/Changelog.md @@ -1,10 +1,41 @@ # Unreleased +# 0.15.0-beta – 2024-02-08 + +## Breaking changes + +- [replace software_interrupt! macro with generic function](https://github.com/rust-osdev/x86_64/pull/259) +- [Use SegmentSelector in InterruptStackFrame](https://github.com/rust-osdev/x86_64/pull/263) +- [add `InvalidStarSegmentSelectors` error](https://github.com/rust-osdev/x86_64/pull/317) +- [add `PcidTooBig` error](https://github.com/rust-osdev/x86_64/pull/316) +- [implement `Index` for IDT instead of `Index`](https://github.com/rust-osdev/x86_64/pull/319) +- [change `cpu_flags`'s type to `RFlags`](https://github.com/rust-osdev/x86_64/pull/324) +- [fix `load_tss` and `GlobalDescriptorTable`](https://github.com/rust-osdev/x86_64/pull/323) +- [add an immutable getter for the level 4 page table](https://github.com/rust-osdev/x86_64/pull/327) +- [make `Cr2::read` return a result](https://github.com/rust-osdev/x86_64/pull/335) +- [remove `external_asm` and `inline_asm` features](https://github.com/rust-osdev/x86_64/pull/345) +- [Allow the GDT to be of any length](https://github.com/rust-osdev/x86_64/pull/360) +- [Remove software_interrupt! macro](https://github.com/rust-osdev/x86_64/pull/363) +- [Remove usize trait impls](https://github.com/rust-osdev/x86_64/pull/364) +- [Remove deprecated functions/flags](https://github.com/rust-osdev/x86_64/pull/368) +- [VirtAddr improvements](https://github.com/rust-osdev/x86_64/pull/370) +- [Add structures::gdt::Entry type](https://github.com/rust-osdev/x86_64/pull/380) +- [Allow GDT to be loaded with shared reference](https://github.com/rust-osdev/x86_64/pull/381) +- [seal off the `PageSize` trait](https://github.com/rust-osdev/x86_64/pull/404) +- [idt: Fixup Options structure and cleanup set_handler_fn](https://github.com/rust-osdev/x86_64/pull/226) + ## New Features - [Add `HandlerFuncType` trait](https://github.com/rust-osdev/x86_64/pull/439) -# 0.14.11 – 2022-09-15 +## Fixes + +- [fix typo in docs](https://github.com/rust-osdev/x86_64/pull/265) +- [activate `feature(asm_const)`](https://github.com/rust-osdev/x86_64/pull/320) +- [gdt: Check that MAX is in range](https://github.com/rust-osdev/x86_64/pull/365) +- [fix `Page::from_page_table_indices`](https://github.com/rust-osdev/x86_64/pull/398) + +# 0.14.11 – 2023-09-15 ## New Features diff --git a/README.md b/README.md index 32c819886..d24f1a1a5 100644 --- a/README.md +++ b/README.md @@ -12,11 +12,13 @@ Support for x86_64 specific instructions (e.g. TLB flush), registers (e.g. contr ## Minimum Supported Rust Version (MSRV) -If no features are enabled (`--no-default-features`), Rust 1.57.0 is required. +If no nightly features are enabled, Rust 1.59.0 is required. +This can be done by either: + - `--no-default-features --features instructions` + - `--no-default-features` -If only the `instructions` feature is enabled (`--no-default-features --features instructions`), Rust 1.59.0 is required. - -If the `nightly` feature or any of its sub-features is enabled, a recent nightly is required. +If the `nightly` feature or any of its sub-features is enabled (which is the +default), a recent nightly is required. ## Other OS development crates @@ -37,4 +39,4 @@ useful crates in this space include: [`read_volatile`](https://doc.rust-lang.org/std/ptr/fn.read_volatile.html) and [`write_volatile`](https://doc.rust-lang.org/std/ptr/fn.write_volatile.html) - Makes it easier to program [MMIO](https://en.wikipedia.org/wiki/Memory-mapped_I/O) interfaces and devices. - - Works on any Rust target. \ No newline at end of file + - Works on any Rust target. diff --git a/src/addr.rs b/src/addr.rs index 8070369b9..83674b034 100644 --- a/src/addr.rs +++ b/src/addr.rs @@ -20,7 +20,7 @@ const ADDRESS_SPACE_SIZE: u64 = 0x1_0000_0000_0000; /// between `u64` and `usize`. /// /// On `x86_64`, only the 48 lower bits of a virtual address can be used. The top 16 bits need -/// to be copies of bit 47, i.e. the most significant bit. Addresses that fulfil this criterium +/// to be copies of bit 47, i.e. the most significant bit. Addresses that fulfil this criterion /// are called “canonical”. This type guarantees that it always represents a canonical address. #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] #[repr(transparent)] @@ -60,39 +60,43 @@ impl core::fmt::Debug for VirtAddrNotValid { impl VirtAddr { /// Creates a new canonical virtual address. /// - /// This function performs sign extension of bit 47 to make the address canonical. + /// The provided address should already be canonical. If you want to check + /// whether an address is canonical, use [`try_new`](Self::try_new). /// /// ## Panics /// - /// This function panics if the bits in the range 48 to 64 contain data (i.e. are not null and no sign extension). + /// This function panics if the bits in the range 48 to 64 are invalid + /// (i.e. are not a proper sign extension of bit 47). #[inline] - pub fn new(addr: u64) -> VirtAddr { - Self::try_new(addr).expect( - "address passed to VirtAddr::new must not contain any data \ - in bits 48 to 64", - ) + pub const fn new(addr: u64) -> VirtAddr { + // TODO: Replace with .ok().expect(msg) when that works on stable. + match Self::try_new(addr) { + Ok(v) => v, + Err(_) => panic!("virtual address must be sign extended in bits 48 to 64"), + } } /// Tries to create a new canonical virtual address. /// - /// This function tries to performs sign - /// extension of bit 47 to make the address canonical. It succeeds if bits 48 to 64 are - /// either a correct sign extension (i.e. copies of bit 47) or all null. Else, an error - /// is returned. - #[inline] - pub fn try_new(addr: u64) -> Result { - match addr.get_bits(47..64) { - 0 | 0x1ffff => Ok(VirtAddr(addr)), // address is canonical - 1 => Ok(VirtAddr::new_truncate(addr)), // address needs sign extension - _ => Err(VirtAddrNotValid(addr)), + /// This function checks wether the given address is canonical + /// and returns an error otherwise. An address is canonical + /// if bits 48 to 64 are a correct sign + /// extension (i.e. copies of bit 47). + #[inline] + pub const fn try_new(addr: u64) -> Result { + let v = Self::new_truncate(addr); + if v.0 == addr { + Ok(v) + } else { + Err(VirtAddrNotValid(addr)) } } /// Creates a new canonical virtual address, throwing out bits 48..64. /// - /// This function performs sign extension of bit 47 to make the address canonical, so - /// bits 48 to 64 are overwritten. If you want to check that these bits contain no data, - /// use `new` or `try_new`. + /// This function performs sign extension of bit 47 to make the address + /// canonical, overwriting bits 48 to 64. If you want to check whether an + /// address is canonical, use [`new`](Self::new) or [`try_new`](Self::try_new). #[inline] pub const fn new_truncate(addr: u64) -> VirtAddr { // By doing the right shift as a signed operation (on a i64), it will @@ -123,11 +127,7 @@ impl VirtAddr { } /// Creates a virtual address from the given pointer - // cfg(target_pointer_width = "32") is only here for backwards - // compatibility: Earlier versions of this crate did not have any `cfg()` - // on this function. At least for 32- and 64-bit we know the `as u64` cast - // doesn't truncate. - #[cfg(any(target_pointer_width = "32", target_pointer_width = "64"))] + #[cfg(target_pointer_width = "64")] #[inline] pub fn from_ptr(ptr: *const T) -> Self { Self::new(ptr as *const () as u64) @@ -320,23 +320,6 @@ impl AddAssign for VirtAddr { } } -#[cfg(target_pointer_width = "64")] -impl Add for VirtAddr { - type Output = Self; - #[inline] - fn add(self, rhs: usize) -> Self::Output { - self + rhs as u64 - } -} - -#[cfg(target_pointer_width = "64")] -impl AddAssign for VirtAddr { - #[inline] - fn add_assign(&mut self, rhs: usize) { - self.add_assign(rhs as u64) - } -} - impl Sub for VirtAddr { type Output = Self; #[inline] @@ -352,23 +335,6 @@ impl SubAssign for VirtAddr { } } -#[cfg(target_pointer_width = "64")] -impl Sub for VirtAddr { - type Output = Self; - #[inline] - fn sub(self, rhs: usize) -> Self::Output { - self - rhs as u64 - } -} - -#[cfg(target_pointer_width = "64")] -impl SubAssign for VirtAddr { - #[inline] - fn sub_assign(&mut self, rhs: usize) { - self.sub_assign(rhs as u64) - } -} - impl Sub for VirtAddr { type Output = u64; #[inline] @@ -583,23 +549,6 @@ impl AddAssign for PhysAddr { } } -#[cfg(target_pointer_width = "64")] -impl Add for PhysAddr { - type Output = Self; - #[inline] - fn add(self, rhs: usize) -> Self::Output { - self + rhs as u64 - } -} - -#[cfg(target_pointer_width = "64")] -impl AddAssign for PhysAddr { - #[inline] - fn add_assign(&mut self, rhs: usize) { - self.add_assign(rhs as u64) - } -} - impl Sub for PhysAddr { type Output = Self; #[inline] @@ -615,23 +564,6 @@ impl SubAssign for PhysAddr { } } -#[cfg(target_pointer_width = "64")] -impl Sub for PhysAddr { - type Output = Self; - #[inline] - fn sub(self, rhs: usize) -> Self::Output { - self - rhs as u64 - } -} - -#[cfg(target_pointer_width = "64")] -impl SubAssign for PhysAddr { - #[inline] - fn sub_assign(&mut self, rhs: usize) { - self.sub_assign(rhs as u64) - } -} - impl Sub for PhysAddr { type Output = u64; #[inline] diff --git a/src/instructions/interrupts.rs b/src/instructions/interrupts.rs index e74d33898..80102cf45 100644 --- a/src/instructions/interrupts.rs +++ b/src/instructions/interrupts.rs @@ -139,12 +139,19 @@ pub fn int3() { /// Generate a software interrupt by invoking the `int` instruction. /// -/// This currently needs to be a macro because the `int` argument needs to be an -/// immediate. This macro will be replaced by a generic function when support for -/// const generics is implemented in Rust. -#[macro_export] -macro_rules! software_interrupt { - ($x:expr) => {{ - asm!("int {id}", id = const $x, options(nomem, nostack)); - }}; +/// ## Safety +/// +/// Invoking an arbitrary interrupt is unsafe. It can cause your system to +/// crash if you invoke a double-fault (#8) or machine-check (#18) exception. +/// It can also cause memory/register corruption depending on the interrupt +/// implementation (if it expects values/pointers to be passed in registers). +#[cfg(feature = "asm_const")] +#[cfg_attr( + feature = "doc_cfg", + doc(cfg(any(feature = "nightly", feature = "asm_const"))) +)] +pub unsafe fn software_interrupt() { + unsafe { + asm!("int {num}", num = const NUM, options(nomem, nostack)); + } } diff --git a/src/instructions/port.rs b/src/instructions/port.rs index 0263d0e0e..9353634e9 100644 --- a/src/instructions/port.rs +++ b/src/instructions/port.rs @@ -4,6 +4,7 @@ use core::arch::asm; use core::fmt; use core::marker::PhantomData; +use crate::sealed::Sealed; pub use crate::structures::port::{PortRead, PortWrite}; impl PortRead for u8 { @@ -66,43 +67,43 @@ impl PortWrite for u32 { } } -mod sealed { - pub trait Access { - const DEBUG_NAME: &'static str; - } -} +/// A marker trait for access types which allow accessing port values. +pub trait PortAccess: Sealed {} /// A marker trait for access types which allow reading port values. -pub trait PortReadAccess: sealed::Access {} +pub trait PortReadAccess: PortAccess {} /// A marker trait for access types which allow writing port values. -pub trait PortWriteAccess: sealed::Access {} +pub trait PortWriteAccess: PortAccess {} /// An access marker type indicating that a port is only allowed to read values. #[derive(Debug)] pub struct ReadOnlyAccess(()); -impl sealed::Access for ReadOnlyAccess { - const DEBUG_NAME: &'static str = "ReadOnly"; +impl Sealed for ReadOnlyAccess { + const DEBUG_STR: &'static str = "ReadOnly"; } +impl PortAccess for ReadOnlyAccess {} impl PortReadAccess for ReadOnlyAccess {} /// An access marker type indicating that a port is only allowed to write values. #[derive(Debug)] pub struct WriteOnlyAccess(()); -impl sealed::Access for WriteOnlyAccess { - const DEBUG_NAME: &'static str = "WriteOnly"; +impl Sealed for WriteOnlyAccess { + const DEBUG_STR: &'static str = "WriteOnly"; } +impl PortAccess for WriteOnlyAccess {} impl PortWriteAccess for WriteOnlyAccess {} /// An access marker type indicating that a port is allowed to read or write values. #[derive(Debug)] pub struct ReadWriteAccess(()); -impl sealed::Access for ReadWriteAccess { - const DEBUG_NAME: &'static str = "ReadWrite"; +impl Sealed for ReadWriteAccess { + const DEBUG_STR: &'static str = "ReadWrite"; } +impl PortAccess for ReadWriteAccess {} impl PortReadAccess for ReadWriteAccess {} impl PortWriteAccess for ReadWriteAccess {} @@ -165,12 +166,12 @@ impl PortGeneric { } } -impl fmt::Debug for PortGeneric { +impl fmt::Debug for PortGeneric { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("PortGeneric") .field("port", &self.port) .field("size", &core::mem::size_of::()) - .field("access", &format_args!("{}", A::DEBUG_NAME)) + .field("access", &format_args!("{}", A::DEBUG_STR)) .finish() } } diff --git a/src/instructions/segmentation.rs b/src/instructions/segmentation.rs index cf8b433d6..f170b083a 100644 --- a/src/instructions/segmentation.rs +++ b/src/instructions/segmentation.rs @@ -109,92 +109,3 @@ impl GS { } } } - -/// Alias for [`CS::set_reg()`] -#[deprecated(since = "0.14.4", note = "use `CS::set_reg()` instead")] -#[allow(clippy::missing_safety_doc)] -#[inline] -pub unsafe fn set_cs(sel: SegmentSelector) { - unsafe { CS::set_reg(sel) } -} -/// Alias for [`SS::set_reg()`] -#[deprecated(since = "0.14.4", note = "use `SS::set_reg()` instead")] -#[allow(clippy::missing_safety_doc)] -#[inline] -pub unsafe fn load_ss(sel: SegmentSelector) { - unsafe { SS::set_reg(sel) } -} -/// Alias for [`DS::set_reg()`] -#[deprecated(since = "0.14.4", note = "use `DS::set_reg()` instead")] -#[allow(clippy::missing_safety_doc)] -#[inline] -pub unsafe fn load_ds(sel: SegmentSelector) { - unsafe { DS::set_reg(sel) } -} -/// Alias for [`ES::set_reg()`] -#[deprecated(since = "0.14.4", note = "use `ES::set_reg()` instead")] -#[allow(clippy::missing_safety_doc)] -#[inline] -pub unsafe fn load_es(sel: SegmentSelector) { - unsafe { ES::set_reg(sel) } -} -/// Alias for [`FS::set_reg()`] -#[deprecated(since = "0.14.4", note = "use `FS::set_reg()` instead")] -#[allow(clippy::missing_safety_doc)] -#[inline] -pub unsafe fn load_fs(sel: SegmentSelector) { - unsafe { FS::set_reg(sel) } -} -/// Alias for [`GS::set_reg()`] -#[deprecated(since = "0.14.4", note = "use `GS::set_reg()` instead")] -#[allow(clippy::missing_safety_doc)] -#[inline] -pub unsafe fn load_gs(sel: SegmentSelector) { - unsafe { GS::set_reg(sel) } -} -/// Alias for [`GS::swap()`] -#[deprecated(since = "0.14.4", note = "use `GS::swap()` instead")] -#[allow(clippy::missing_safety_doc)] -#[inline] -pub unsafe fn swap_gs() { - unsafe { GS::swap() } -} -/// Alias for [`CS::get_reg()`] -#[deprecated(since = "0.14.4", note = "use `CS::get_reg()` instead")] -#[allow(clippy::missing_safety_doc)] -#[inline] -pub fn cs() -> SegmentSelector { - CS::get_reg() -} -/// Alias for [`FS::write_base()`]. -/// -/// Panics if the provided address is non-canonical. -#[deprecated(since = "0.14.4", note = "use `FS::write_base()` instead")] -#[allow(clippy::missing_safety_doc)] -#[inline] -pub unsafe fn wrfsbase(val: u64) { - unsafe { FS::write_base(VirtAddr::new(val)) } -} -/// Alias for [`FS::read_base()`] -#[deprecated(since = "0.14.4", note = "use `FS::read_base()` instead")] -#[allow(clippy::missing_safety_doc)] -#[inline] -pub unsafe fn rdfsbase() -> u64 { - FS::read_base().as_u64() -} -/// Alias for [`GS::write_base()`]. -/// -/// Panics if the provided address is non-canonical. -#[deprecated(since = "0.14.4", note = "use `GS::write_base()` instead")] -#[allow(clippy::missing_safety_doc)] -#[inline] -pub unsafe fn wrgsbase(val: u64) { - unsafe { GS::write_base(VirtAddr::new(val)) } -} -/// Alias for [`GS::read_base()`] -#[deprecated(since = "0.14.4", note = "use `GS::read_base()` instead")] -#[allow(clippy::missing_safety_doc)] -#[inline] -pub unsafe fn rdgsbase() -> u64 { - GS::read_base().as_u64() -} diff --git a/src/instructions/tlb.rs b/src/instructions/tlb.rs index 7f5fd48a1..ea60fce77 100644 --- a/src/instructions/tlb.rs +++ b/src/instructions/tlb.rs @@ -61,9 +61,9 @@ pub struct Pcid(u16); impl Pcid { /// Create a new PCID. Will result in a failure if the value of /// PCID is out of expected bounds. - pub const fn new(pcid: u16) -> Result { + pub const fn new(pcid: u16) -> Result { if pcid >= 4096 { - Err("PCID should be < 4096.") + Err(PcidTooBig(pcid)) } else { Ok(Pcid(pcid)) } @@ -75,6 +75,18 @@ impl Pcid { } } +/// A passed `u16` was not a valid PCID. +/// +/// A PCID has to be <= 4096 for x86_64. +#[derive(Debug)] +pub struct PcidTooBig(u16); + +impl fmt::Display for PcidTooBig { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "PCID should be < 4096, got {}", self.0) + } +} + /// Invalidate the given address in the TLB using the `invpcid` instruction. /// /// ## Safety diff --git a/src/lib.rs b/src/lib.rs index 820452803..c491ddbfe 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -2,7 +2,8 @@ //! and access to various system registers. #![cfg_attr(not(test), no_std)] -#![cfg_attr(feature = "const_fn", feature(const_mut_refs))] // GDT add_entry() +#![cfg_attr(feature = "const_fn", feature(const_mut_refs))] // GDT::append() +#![cfg_attr(feature = "asm_const", feature(asm_const))] #![cfg_attr(feature = "abi_x86_interrupt", feature(abi_x86_interrupt))] #![cfg_attr(feature = "step_trait", feature(step_trait))] #![cfg_attr(feature = "doc_auto_cfg", feature(doc_auto_cfg))] @@ -62,3 +63,10 @@ impl PrivilegeLevel { } } } + +pub(crate) mod sealed { + pub trait Sealed { + /// A string representation for debug output. + const DEBUG_STR: &'static str; + } +} diff --git a/src/registers/control.rs b/src/registers/control.rs index 6b2dfebcf..4056bfea8 100644 --- a/src/registers/control.rs +++ b/src/registers/control.rs @@ -51,14 +51,6 @@ bitflags! { } } -impl Cr0Flags { - #[deprecated = "use the safe `from_bits_retain` method instead"] - /// Convert from underlying bit representation, preserving all bits (even those not corresponding to a defined flag). - pub const unsafe fn from_bits_unchecked(bits: u64) -> Self { - Self::from_bits_retain(bits) - } -} - /// Contains the Page Fault Linear Address (PFLA). /// /// When a page fault occurs, the CPU sets this register to the faulting virtual address. @@ -82,14 +74,6 @@ bitflags! { } } -impl Cr3Flags { - #[deprecated = "use the safe `from_bits_retain` method instead"] - /// Convert from underlying bit representation, preserving all bits (even those not corresponding to a defined flag). - pub const unsafe fn from_bits_unchecked(bits: u64) -> Self { - Self::from_bits_retain(bits) - } -} - /// Contains various control flags that enable architectural extensions, and /// indicate support for specific processor capabilities. #[derive(Debug)] @@ -162,9 +146,6 @@ bitflags! { /// Also enables access to the PKRU register (via the `RDPKRU`/`WRPKRU` /// instructions) to set user-mode protection key access controls. const PROTECTION_KEY_USER = 1 << 22; - /// Alias for [`PROTECTION_KEY_USER`](Cr4Flags::PROTECTION_KEY_USER) - #[deprecated(since = "0.14.5", note = "use `PROTECTION_KEY_USER` instead")] - const PROTECTION_KEY = 1 << 22; /// Enables Control-flow Enforcement Technology (CET) /// /// This enables the shadow stack feature, ensuring return addresses read @@ -178,18 +159,13 @@ bitflags! { } } -impl Cr4Flags { - #[deprecated = "use the safe `from_bits_retain` method instead"] - /// Convert from underlying bit representation, preserving all bits (even those not corresponding to a defined flag). - pub const unsafe fn from_bits_unchecked(bits: u64) -> Self { - Self::from_bits_retain(bits) - } -} - #[cfg(feature = "instructions")] mod x86_64 { use super::*; - use crate::{instructions::tlb::Pcid, structures::paging::PhysFrame, PhysAddr, VirtAddr}; + use crate::{ + addr::VirtAddrNotValid, instructions::tlb::Pcid, structures::paging::PhysFrame, PhysAddr, + VirtAddr, + }; use core::arch::asm; impl Cr0 { @@ -268,9 +244,14 @@ mod x86_64 { impl Cr2 { /// Read the current page fault linear address from the CR2 register. + /// + /// # Errors + /// + /// This method returns a [`VirtAddrNotValid`] error if the CR2 register contains a + /// non-canonical address. Call [`Cr2::read_raw`] to handle such cases. #[inline] - pub fn read() -> VirtAddr { - VirtAddr::new(Self::read_raw()) + pub fn read() -> Result { + VirtAddr::try_new(Self::read_raw()) } /// Read the current page fault linear address from the CR2 register as a raw `u64`. diff --git a/src/registers/debug.rs b/src/registers/debug.rs index f70119746..068b2ee52 100644 --- a/src/registers/debug.rs +++ b/src/registers/debug.rs @@ -160,12 +160,6 @@ impl Dr6Flags { DebugAddressRegisterNumber::Dr3 => Self::TRAP3, } } - - #[deprecated = "use the safe `from_bits_retain` method instead"] - /// Convert from underlying bit representation, preserving all bits (even those not corresponding to a defined flag). - pub const unsafe fn from_bits_unchecked(bits: u64) -> Self { - Self::from_bits_retain(bits) - } } bitflags! { @@ -239,12 +233,6 @@ impl Dr7Flags { DebugAddressRegisterNumber::Dr3 => Self::GLOBAL_BREAKPOINT_3_ENABLE, } } - - #[deprecated = "use the safe `from_bits_retain` method instead"] - /// Convert from underlying bit representation, preserving all bits (even those not corresponding to a defined flag). - pub const unsafe fn from_bits_unchecked(bits: u64) -> Self { - Self::from_bits_retain(bits) - } } /// The condition for a hardware breakpoint. diff --git a/src/registers/mod.rs b/src/registers/mod.rs index 73632c06b..60c6ca545 100644 --- a/src/registers/mod.rs +++ b/src/registers/mod.rs @@ -8,9 +8,5 @@ pub mod rflags; pub mod segmentation; pub mod xcontrol; -#[cfg(feature = "instructions")] -#[allow(deprecated)] -pub use crate::instructions::segmentation::{rdfsbase, rdgsbase, wrfsbase, wrgsbase}; - #[cfg(feature = "instructions")] pub use crate::instructions::read_rip; diff --git a/src/registers/model_specific.rs b/src/registers/model_specific.rs index 529ca9d6d..18d98bcda 100644 --- a/src/registers/model_specific.rs +++ b/src/registers/model_specific.rs @@ -132,14 +132,6 @@ bitflags! { } } -impl EferFlags { - #[deprecated = "use the safe `from_bits_retain` method instead"] - /// Convert from underlying bit representation, preserving all bits (even those not corresponding to a defined flag). - pub const unsafe fn from_bits_unchecked(bits: u64) -> Self { - Self::from_bits_retain(bits) - } -} - bitflags! { /// Flags stored in IA32_U_CET and IA32_S_CET (Table-2-2 in Intel SDM Volume /// 4). The Intel SDM-equivalent names are described in parentheses. @@ -165,14 +157,6 @@ bitflags! { } } -impl CetFlags { - #[deprecated = "use the safe `from_bits_retain` method instead"] - /// Convert from underlying bit representation, preserving all bits (even those not corresponding to a defined flag). - pub const unsafe fn from_bits_unchecked(bits: u64) -> Self { - Self::from_bits_retain(bits) - } -} - #[cfg(feature = "instructions")] mod x86_64 { use super::*; @@ -184,6 +168,7 @@ mod x86_64 { use crate::PrivilegeLevel; use bit_field::BitField; use core::convert::TryInto; + use core::fmt; // imports for intra doc links #[cfg(doc)] use crate::registers::{ @@ -442,35 +427,27 @@ mod x86_64 { ss_sysret: SegmentSelector, cs_syscall: SegmentSelector, ss_syscall: SegmentSelector, - ) -> Result<(), &'static str> { - let cs_sysret_cmp = cs_sysret - .0 - .checked_sub(16) - .ok_or("Sysret CS is not at least 16.")?; - let ss_sysret_cmp = ss_sysret - .0 - .checked_sub(8) - .ok_or("Sysret SS is not at least 8.")?; - let cs_syscall_cmp = cs_syscall.0; - let ss_syscall_cmp = ss_syscall - .0 - .checked_sub(8) - .ok_or("Syscall SS is not at least 8.")?; + ) -> Result<(), InvalidStarSegmentSelectors> { + // Convert to i32 to prevent underflows. + let cs_sysret_cmp = i32::from(cs_sysret.0) - 16; + let ss_sysret_cmp = i32::from(ss_sysret.0) - 8; + let cs_syscall_cmp = i32::from(cs_syscall.0); + let ss_syscall_cmp = i32::from(ss_syscall.0) - 8; if cs_sysret_cmp != ss_sysret_cmp { - return Err("Sysret CS and SS is not offset by 8."); + return Err(InvalidStarSegmentSelectors::SysretOffset); } if cs_syscall_cmp != ss_syscall_cmp { - return Err("Syscall CS and SS is not offset by 8."); + return Err(InvalidStarSegmentSelectors::SyscallOffset); } if ss_sysret.rpl() != PrivilegeLevel::Ring3 { - return Err("Sysret's segment must be a Ring3 segment."); + return Err(InvalidStarSegmentSelectors::SysretPrivilegeLevel); } if ss_syscall.rpl() != PrivilegeLevel::Ring0 { - return Err("Syscall's segment must be a Ring0 segment."); + return Err(InvalidStarSegmentSelectors::SyscallPrivilegeLevel); } unsafe { Self::write_raw(ss_sysret.0 - 8, cs_syscall.0) }; @@ -479,6 +456,29 @@ mod x86_64 { } } + #[derive(Debug)] + pub enum InvalidStarSegmentSelectors { + SysretOffset, + SyscallOffset, + SysretPrivilegeLevel, + SyscallPrivilegeLevel, + } + + impl fmt::Display for InvalidStarSegmentSelectors { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Self::SysretOffset => write!(f, "Sysret CS and SS are not offset by 8."), + Self::SyscallOffset => write!(f, "Syscall CS and SS are not offset by 8."), + Self::SysretPrivilegeLevel => { + write!(f, "Sysret's segment must be a Ring3 segment.") + } + Self::SyscallPrivilegeLevel => { + write!(f, "Syscall's segment must be a Ring0 segment.") + } + } + } + } + impl LStar { /// Read the current LStar register. /// This holds the target RIP of a syscall. diff --git a/src/registers/mxcsr.rs b/src/registers/mxcsr.rs index 55e05cc56..19db2741d 100644 --- a/src/registers/mxcsr.rs +++ b/src/registers/mxcsr.rs @@ -60,14 +60,6 @@ impl Default for MxCsr { } } -impl MxCsr { - #[deprecated = "use the safe `from_bits_retain` method instead"] - /// Convert from underlying bit representation, preserving all bits (even those not corresponding to a defined flag). - pub const unsafe fn from_bits_unchecked(bits: u32) -> Self { - Self::from_bits_retain(bits) - } -} - #[cfg(feature = "instructions")] mod x86_64 { use super::*; diff --git a/src/registers/rflags.rs b/src/registers/rflags.rs index 7a20d930d..921bb8f2c 100644 --- a/src/registers/rflags.rs +++ b/src/registers/rflags.rs @@ -6,7 +6,7 @@ pub use self::x86_64::*; use bitflags::bitflags; bitflags! { - /// The RFLAGS register. + /// The RFLAGS register. All bit patterns are valid representations for this type. #[repr(transparent)] #[derive(PartialEq, Eq, PartialOrd, Ord, Hash, Debug, Clone, Copy)] pub struct RFlags: u64 { @@ -64,14 +64,6 @@ bitflags! { } } -impl RFlags { - #[deprecated = "use the safe `from_bits_retain` method instead"] - /// Convert from underlying bit representation, preserving all bits (even those not corresponding to a defined flag). - pub const unsafe fn from_bits_unchecked(bits: u64) -> Self { - Self::from_bits_retain(bits) - } -} - #[cfg(feature = "instructions")] mod x86_64 { use super::*; diff --git a/src/registers/xcontrol.rs b/src/registers/xcontrol.rs index 2ab9924f6..09087a430 100644 --- a/src/registers/xcontrol.rs +++ b/src/registers/xcontrol.rs @@ -26,9 +26,6 @@ bitflags! { /// Enables AVX instructions and using the upper halves of the AVX registers /// with `XSAVE`/`XRSTOR`. const AVX = 1 << 2; - /// Alias for [`AVX`](XCr0Flags::AVX) - #[deprecated(since = "0.14.5", note = "use `AVX` instead")] - const YMM = 1<<2; /// Enables MPX instructions and using the BND0-BND3 bound registers /// with `XSAVE`/`XRSTOR` (Intel Only). const BNDREG = 1 << 3; @@ -53,14 +50,6 @@ bitflags! { } } -impl XCr0Flags { - #[deprecated = "use the safe `from_bits_retain` method instead"] - /// Convert from underlying bit representation, preserving all bits (even those not corresponding to a defined flag). - pub const unsafe fn from_bits_unchecked(bits: u64) -> Self { - Self::from_bits_retain(bits) - } -} - #[cfg(feature = "instructions")] mod x86_64 { use super::*; diff --git a/src/structures/gdt.rs b/src/structures/gdt.rs index 3f0c2644c..701c7a1d4 100644 --- a/src/structures/gdt.rs +++ b/src/structures/gdt.rs @@ -5,19 +5,79 @@ use crate::structures::tss::TaskStateSegment; use crate::PrivilegeLevel; use bit_field::BitField; use bitflags::bitflags; +use core::fmt; // imports for intra-doc links #[cfg(doc)] use crate::registers::segmentation::{Segment, CS, SS}; +#[cfg(feature = "instructions")] +use core::sync::atomic::{AtomicU64 as EntryValue, Ordering}; +#[cfg(not(feature = "instructions"))] +use u64 as EntryValue; + +/// 8-byte entry in a descriptor table. +/// +/// A [`GlobalDescriptorTable`] (or LDT) is an array of these entries, and +/// [`SegmentSelector`]s index into this array. Each [`Descriptor`] in the table +/// uses either 1 Entry (if it is a [`UserSegment`](Descriptor::UserSegment)) or +/// 2 Entries (if it is a [`SystemSegment`](Descriptor::SystemSegment)). This +/// type exists to give users access to the raw entry bits in a GDT. +#[repr(transparent)] +pub struct Entry(EntryValue); + +impl Entry { + // Create a new Entry from a raw value. + const fn new(raw: u64) -> Self { + #[cfg(feature = "instructions")] + let raw = EntryValue::new(raw); + Self(raw) + } + + /// The raw bits for this entry. Depending on the [`Descriptor`] type, these + /// bits may correspond to those in [`DescriptorFlags`]. + pub fn raw(&self) -> u64 { + // TODO: Make this const fn when AtomicU64::load is const. + #[cfg(feature = "instructions")] + let raw = self.0.load(Ordering::SeqCst); + #[cfg(not(feature = "instructions"))] + let raw = self.0; + raw + } +} + +impl Clone for Entry { + fn clone(&self) -> Self { + Self::new(self.raw()) + } +} + +impl PartialEq for Entry { + fn eq(&self, other: &Self) -> bool { + self.raw() == other.raw() + } +} + +impl Eq for Entry {} + +impl fmt::Debug for Entry { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + // Display inner value as hex + write!(f, "Entry({:#018x})", self.raw()) + } +} + /// A 64-bit mode global descriptor table (GDT). /// /// In 64-bit mode, segmentation is not supported. The GDT is used nonetheless, for example for /// switching between user and kernel mode or for loading a TSS. /// -/// The GDT has a fixed size of 8 entries, trying to add more entries will panic. +/// The GDT has a fixed maximum size given by the `MAX` const generic parameter. +/// Overflowing this limit by adding too many [`Descriptor`]s via +/// [`GlobalDescriptorTable::append`] will panic. /// /// You do **not** need to add a null segment descriptor yourself - this is already done -/// internally. +/// internally. This means you can add up to `MAX - 1` additional [`Entry`]s to +/// this table. Note that some [`Descriptor`]s may take up 2 [`Entry`]s. /// /// Data segment registers in ring 0 can be loaded with the null segment selector. When running in /// ring 3, the `ss` register must point to a valid data segment which can be obtained through the @@ -37,68 +97,94 @@ use crate::registers::segmentation::{Segment, CS, SS}; /// use x86_64::structures::gdt::{GlobalDescriptorTable, Descriptor}; /// /// let mut gdt = GlobalDescriptorTable::new(); -/// gdt.add_entry(Descriptor::kernel_code_segment()); -/// gdt.add_entry(Descriptor::user_code_segment()); -/// gdt.add_entry(Descriptor::user_data_segment()); +/// gdt.append(Descriptor::kernel_code_segment()); +/// gdt.append(Descriptor::user_code_segment()); +/// gdt.append(Descriptor::user_data_segment()); /// /// // Add entry for TSS, call gdt.load() then update segment registers /// ``` #[derive(Debug, Clone)] -pub struct GlobalDescriptorTable { - table: [u64; 8], +pub struct GlobalDescriptorTable { + table: [Entry; MAX], len: usize, } impl GlobalDescriptorTable { - /// Creates an empty GDT. + /// Creates an empty GDT with the default length of 8. + pub const fn new() -> Self { + Self::empty() + } +} + +impl GlobalDescriptorTable { + /// Creates an empty GDT which can hold `MAX` number of [`Entry`]s. #[inline] - pub const fn new() -> GlobalDescriptorTable { - GlobalDescriptorTable { - table: [0; 8], + pub const fn empty() -> Self { + // TODO: Replace with compiler error when feature(generic_const_exprs) is stable. + assert!(MAX > 0, "A GDT cannot have 0 entries"); + assert!(MAX <= (1 << 13), "A GDT can only have at most 2^13 entries"); + + // TODO: Replace with inline_const when it's stable. + #[allow(clippy::declare_interior_mutable_const)] + const NULL: Entry = Entry::new(0); + Self { + table: [NULL; MAX], len: 1, } } /// Forms a GDT from a slice of `u64`. /// - /// # Safety + /// This method allows for creation of a GDT with malformed or invalid + /// entries. However, it is safe because loading a GDT with invalid + /// entires doesn't do anything until those entries are used. For example, + /// [`CS::set_reg`] and [`load_tss`](crate::instructions::tables::load_tss) + /// are both unsafe for this reason. /// - /// * The user must make sure that the entries are well formed - /// * The provided slice **must not be larger than 8 items** (only up to the first 8 will be observed.) + /// Panics if: + /// * the provided slice has more than `MAX` entries + /// * the provided slice is empty + /// * the first entry is not zero + #[cfg_attr(not(feature = "instructions"), allow(rustdoc::broken_intra_doc_links))] #[inline] - pub const unsafe fn from_raw_slice(slice: &[u64]) -> GlobalDescriptorTable { + pub const fn from_raw_entries(slice: &[u64]) -> Self { let len = slice.len(); - let mut table = [0; 8]; + let mut table = Self::empty().table; let mut idx = 0; + assert!(len > 0, "cannot initialize GDT with empty slice"); + assert!(slice[0] == 0, "first GDT entry must be zero"); assert!( - len <= 8, - "initializing a GDT from a slice requires it to be **at most** 8 elements." + len <= MAX, + "cannot initialize GDT with slice exceeding the maximum length" ); while idx < len { - table[idx] = slice[idx]; + table[idx] = Entry::new(slice[idx]); idx += 1; } - GlobalDescriptorTable { table, len } + Self { table, len } } - /// Get a reference to the internal table. + /// Get a reference to the internal [`Entry`] table. /// - /// The resulting slice may contain system descriptors, which span two `u64`s. + /// The resulting slice may contain system descriptors, which span two [`Entry`]s. #[inline] - pub fn as_raw_slice(&self) -> &[u64] { + pub fn entries(&self) -> &[Entry] { &self.table[..self.len] } - /// Adds the given segment descriptor to the GDT, returning the segment selector. + /// Appends the given segment descriptor to the GDT, returning the segment selector. /// - /// Panics if the GDT doesn't have enough free entries to hold the Descriptor. + /// Note that depending on the type of the [`Descriptor`] this may append + /// either one or two new [`Entry`]s to the table. + /// + /// Panics if the GDT doesn't have enough free entries. #[inline] #[cfg_attr(feature = "const_fn", rustversion::attr(all(), const))] - pub fn add_entry(&mut self, entry: Descriptor) -> SegmentSelector { + pub fn append(&mut self, entry: Descriptor) -> SegmentSelector { let index = match entry { Descriptor::UserSegment(value) => { if self.len > self.table.len().saturating_sub(1) { @@ -153,7 +239,7 @@ impl GlobalDescriptorTable { #[cfg_attr(feature = "const_fn", rustversion::attr(all(), const))] fn push(&mut self, value: u64) -> usize { let index = self.len; - self.table[index] = value; + self.table[index] = Entry::new(value); self.len += 1; index } @@ -165,6 +251,8 @@ impl GlobalDescriptorTable { use core::mem::size_of; super::DescriptorTablePointer { base: crate::VirtAddr::new(self.table.as_ptr() as u64), + // 0 < self.next_free <= MAX <= 2^13, so the limit calculation + // will not underflow or overflow. limit: (self.len * size_of::() - 1) as u16, } } @@ -269,12 +357,6 @@ impl DescriptorFlags { /// A 64-bit user code segment pub const USER_CODE64: Self = Self::from_bits_truncate(Self::KERNEL_CODE64.bits() | Self::DPL_RING_3.bits()); - - #[deprecated = "use the safe `from_bits_retain` method instead"] - /// Convert from underlying bit representation, preserving all bits (even those not corresponding to a defined flag). - pub const unsafe fn from_bits_unchecked(bits: u64) -> Self { - Self::from_bits_retain(bits) - } } impl Descriptor { @@ -384,11 +466,11 @@ mod tests { // Makes a GDT that has two free slots fn make_six_entry_gdt() -> GlobalDescriptorTable { let mut gdt = GlobalDescriptorTable::new(); - gdt.add_entry(Descriptor::kernel_code_segment()); - gdt.add_entry(Descriptor::kernel_data_segment()); - gdt.add_entry(Descriptor::UserSegment(DescriptorFlags::USER_CODE32.bits())); - gdt.add_entry(Descriptor::user_data_segment()); - gdt.add_entry(Descriptor::user_code_segment()); + gdt.append(Descriptor::kernel_code_segment()); + gdt.append(Descriptor::kernel_data_segment()); + gdt.append(Descriptor::UserSegment(DescriptorFlags::USER_CODE32.bits())); + gdt.append(Descriptor::user_data_segment()); + gdt.append(Descriptor::user_code_segment()); assert_eq!(gdt.len, 6); gdt } @@ -397,7 +479,7 @@ mod tests { fn make_full_gdt() -> GlobalDescriptorTable { let mut gdt = make_six_entry_gdt(); - gdt.add_entry(Descriptor::tss_segment(&TSS)); + gdt.append(Descriptor::tss_segment(&TSS)); assert_eq!(gdt.len, 8); gdt } @@ -406,9 +488,9 @@ mod tests { pub fn push_max_segments() { // Make sure we don't panic with user segments let mut gdt = make_six_entry_gdt(); - gdt.add_entry(Descriptor::user_data_segment()); + gdt.append(Descriptor::user_data_segment()); assert_eq!(gdt.len, 7); - gdt.add_entry(Descriptor::user_data_segment()); + gdt.append(Descriptor::user_data_segment()); assert_eq!(gdt.len, 8); // Make sure we don't panic with system segments let _ = make_full_gdt(); @@ -418,16 +500,24 @@ mod tests { #[should_panic] pub fn panic_user_segment() { let mut gdt = make_full_gdt(); - gdt.add_entry(Descriptor::user_data_segment()); + gdt.append(Descriptor::user_data_segment()); } #[test] #[should_panic] pub fn panic_system_segment() { let mut gdt = make_six_entry_gdt(); - gdt.add_entry(Descriptor::user_data_segment()); + gdt.append(Descriptor::user_data_segment()); // We have one free slot, but the GDT requires two - gdt.add_entry(Descriptor::tss_segment(&TSS)); + gdt.append(Descriptor::tss_segment(&TSS)); + } + + #[test] + pub fn from_entries() { + let raw = [0, Flags::KERNEL_CODE64.bits(), Flags::KERNEL_DATA.bits()]; + let gdt = GlobalDescriptorTable::<3>::from_raw_entries(&raw); + assert_eq!(gdt.table.len(), 3); + assert_eq!(gdt.entries().len(), 3); } #[test] diff --git a/src/structures/idt.rs b/src/structures/idt.rs index 1f7381b6a..3952ae96f 100644 --- a/src/structures/idt.rs +++ b/src/structures/idt.rs @@ -20,15 +20,21 @@ //! //! These types are defined for the compatibility with the Nightly Rust build. +use crate::registers::rflags::RFlags; use crate::{PrivilegeLevel, VirtAddr}; use bit_field::BitField; use bitflags::bitflags; use core::fmt; use core::marker::PhantomData; use core::ops::Bound::{Excluded, Included, Unbounded}; -use core::ops::{Deref, Index, IndexMut, RangeBounds}; +use core::ops::{ + Bound, Deref, Index, IndexMut, Range, RangeBounds, RangeFrom, RangeFull, RangeInclusive, + RangeTo, RangeToInclusive, +}; use volatile::Volatile; +use super::gdt::SegmentSelector; + /// An Interrupt Descriptor Table with 256 entries. /// /// The first 32 entries are used for CPU exceptions. These entries can be either accessed through @@ -519,25 +525,21 @@ impl InterruptDescriptorTable { } } - /// Returns a normalized and ranged check slice range from a RangeBounds trait object + /// Returns a normalized and ranged check slice range from a RangeBounds trait object. /// - /// Panics if range is outside the range of user interrupts (i.e. greater than 255) or if the entry is an - /// exception - fn condition_slice_bounds(&self, bounds: impl RangeBounds) -> (usize, usize) { + /// Panics if the entry is an exception. + fn condition_slice_bounds(&self, bounds: impl RangeBounds) -> (usize, usize) { let lower_idx = match bounds.start_bound() { - Included(start) => *start, - Excluded(start) => *start + 1, + Included(start) => usize::from(*start), + Excluded(start) => usize::from(*start) + 1, Unbounded => 0, }; let upper_idx = match bounds.end_bound() { - Included(end) => *end + 1, - Excluded(end) => *end, + Included(end) => usize::from(*end) + 1, + Excluded(end) => usize::from(*end), Unbounded => 256, }; - if lower_idx > 256 || upper_idx > 256 { - panic!("Index out of range [{}..{}]", lower_idx, upper_idx); - } if lower_idx < 32 { panic!("Cannot return slice from traps, faults, and exception handlers"); } @@ -546,34 +548,31 @@ impl InterruptDescriptorTable { /// Returns slice of IDT entries with the specified range. /// - /// Panics if range is outside the range of user interrupts (i.e. greater than 255) or if the entry is an - /// exception + /// Panics if the entry is an exception. #[inline] - pub fn slice(&self, bounds: impl RangeBounds) -> &[Entry] { + pub fn slice(&self, bounds: impl RangeBounds) -> &[Entry] { let (lower_idx, upper_idx) = self.condition_slice_bounds(bounds); &self.interrupts[(lower_idx - 32)..(upper_idx - 32)] } /// Returns a mutable slice of IDT entries with the specified range. /// - /// Panics if range is outside the range of user interrupts (i.e. greater than 255) or if the entry is an - /// exception + /// Panics if the entry is an exception. #[inline] - pub fn slice_mut(&mut self, bounds: impl RangeBounds) -> &mut [Entry] { + pub fn slice_mut(&mut self, bounds: impl RangeBounds) -> &mut [Entry] { let (lower_idx, upper_idx) = self.condition_slice_bounds(bounds); &mut self.interrupts[(lower_idx - 32)..(upper_idx - 32)] } } -impl Index for InterruptDescriptorTable { +impl Index for InterruptDescriptorTable { type Output = Entry; /// Returns the IDT entry with the specified index. /// - /// Panics if index is outside the IDT (i.e. greater than 255) or if the entry is an - /// exception that pushes an error code (use the struct fields for accessing these entries). + /// Panics if the entry is an exception that pushes an error code (use the struct fields for accessing these entries). #[inline] - fn index(&self, index: usize) -> &Self::Output { + fn index(&self, index: u8) -> &Self::Output { match index { 0 => &self.divide_error, 1 => &self.debug, @@ -588,24 +587,22 @@ impl Index for InterruptDescriptorTable { 19 => &self.simd_floating_point, 20 => &self.virtualization, 28 => &self.hv_injection_exception, - i @ 32..=255 => &self.interrupts[i - 32], + i @ 32..=255 => &self.interrupts[usize::from(i) - 32], i @ 15 | i @ 31 | i @ 22..=27 => panic!("entry {} is reserved", i), i @ 8 | i @ 10..=14 | i @ 17 | i @ 21 | i @ 29 | i @ 30 => { panic!("entry {} is an exception with error code", i) } i @ 18 => panic!("entry {} is an diverging exception (must not return)", i), - i => panic!("no entry with index {}", i), } } } -impl IndexMut for InterruptDescriptorTable { +impl IndexMut for InterruptDescriptorTable { /// Returns a mutable reference to the IDT entry with the specified index. /// - /// Panics if index is outside the IDT (i.e. greater than 255) or if the entry is an - /// exception that pushes an error code (use the struct fields for accessing these entries). + /// Panics if the entry is an exception that pushes an error code (use the struct fields for accessing these entries). #[inline] - fn index_mut(&mut self, index: usize) -> &mut Self::Output { + fn index_mut(&mut self, index: u8) -> &mut Self::Output { match index { 0 => &mut self.divide_error, 1 => &mut self.debug, @@ -620,26 +617,65 @@ impl IndexMut for InterruptDescriptorTable { 19 => &mut self.simd_floating_point, 20 => &mut self.virtualization, 28 => &mut self.hv_injection_exception, - i @ 32..=255 => &mut self.interrupts[i - 32], + i @ 32..=255 => &mut self.interrupts[usize::from(i) - 32], i @ 15 | i @ 31 | i @ 22..=27 => panic!("entry {} is reserved", i), i @ 8 | i @ 10..=14 | i @ 17 | i @ 21 | i @ 29 | i @ 30 => { panic!("entry {} is an exception with error code", i) } i @ 18 => panic!("entry {} is an diverging exception (must not return)", i), - i => panic!("no entry with index {}", i), } } } +macro_rules! impl_index_for_idt { + ($ty:ty) => { + impl Index<$ty> for InterruptDescriptorTable { + type Output = [Entry]; + + /// Returns the IDT entry with the specified index. + /// + /// Panics if index is outside the IDT (i.e. greater than 255) or if the entry is an + /// exception that pushes an error code (use the struct fields for accessing these entries). + #[inline] + fn index(&self, index: $ty) -> &Self::Output { + self.slice(index) + } + } + + impl IndexMut<$ty> for InterruptDescriptorTable { + /// Returns a mutable reference to the IDT entry with the specified index. + /// + /// Panics if the entry is an exception that pushes an error code (use the struct fields for accessing these entries). + #[inline] + fn index_mut(&mut self, index: $ty) -> &mut Self::Output { + self.slice_mut(index) + } + } + }; +} + +// this list was stolen from the list of implementors in https://doc.rust-lang.org/core/ops/trait.RangeBounds.html +impl_index_for_idt!((Bound<&u8>, Bound<&u8>)); +impl_index_for_idt!((Bound, Bound)); +impl_index_for_idt!(Range<&u8>); +impl_index_for_idt!(Range); +impl_index_for_idt!(RangeFrom<&u8>); +impl_index_for_idt!(RangeFrom); +impl_index_for_idt!(RangeInclusive<&u8>); +impl_index_for_idt!(RangeInclusive); +impl_index_for_idt!(RangeTo); +impl_index_for_idt!(RangeTo<&u8>); +impl_index_for_idt!(RangeToInclusive<&u8>); +impl_index_for_idt!(RangeToInclusive); +impl_index_for_idt!(RangeFull); + /// An Interrupt Descriptor Table entry. /// -/// The generic parameter can either be `HandlerFunc` or `HandlerFuncWithErrCode`, depending -/// on the interrupt vector. +/// The generic parameter is some [`InterruptFn`], depending on the interrupt vector. #[derive(Clone, Copy)] #[repr(C)] pub struct Entry { pointer_low: u16, - gdt_selector: u16, options: EntryOptions, pointer_middle: u16, pointer_high: u32, @@ -651,7 +687,6 @@ impl fmt::Debug for Entry { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("Entry") .field("handler_addr", &format_args!("{:#x}", self.handler_addr())) - .field("gdt_selector", &self.gdt_selector) .field("options", &self.options) .finish() } @@ -660,7 +695,6 @@ impl fmt::Debug for Entry { impl PartialEq for Entry { fn eq(&self, other: &Self) -> bool { self.pointer_low == other.pointer_low - && self.gdt_selector == other.gdt_selector && self.options == other.options && self.pointer_middle == other.pointer_middle && self.pointer_high == other.pointer_high @@ -728,7 +762,6 @@ impl Entry { #[inline] pub const fn missing() -> Self { Entry { - gdt_selector: 0, pointer_low: 0, pointer_middle: 0, pointer_high: 0, @@ -738,10 +771,12 @@ impl Entry { } } - /// Set the handler address for the IDT entry and sets the present bit. - /// - /// For the code selector field, this function uses the code segment selector currently - /// active in the CPU. + /// Sets the handler address for the IDT entry and sets the following defaults: + /// - The code selector is the code segment currently active in the CPU + /// - The present bit is set + /// - Interrupts are disabled on handler invocation + /// - The privilege level (DPL) is [`PrivilegeLevel::Ring0`] + /// - No IST is configured (existing stack will be used) /// /// The function returns a mutable reference to the entry's options that allows /// further customization. @@ -756,13 +791,13 @@ impl Entry { use crate::instructions::segmentation::{Segment, CS}; let addr = addr.as_u64(); - self.pointer_low = addr as u16; self.pointer_middle = (addr >> 16) as u16; self.pointer_high = (addr >> 32) as u32; - self.gdt_selector = CS::get_reg().0; - + self.options = EntryOptions::minimal(); + // SAFETY: The current CS is a valid, long-mode code segment. + unsafe { self.options.set_code_selector(CS::get_reg()) }; self.options.set_present(true); &mut self.options } @@ -781,10 +816,12 @@ impl Entry { #[cfg(feature = "instructions")] impl Entry { - /// Set the handler function for the IDT entry and sets the present bit. - /// - /// For the code selector field, this function uses the code segment selector currently - /// active in the CPU. + /// Sets the handler function for the IDT entry and sets the following defaults: + /// - The code selector is the code segment currently active in the CPU + /// - The present bit is set + /// - Interrupts are disabled on handler invocation + /// - The privilege level (DPL) is [`PrivilegeLevel::Ring0`] + /// - No IST is configured (existing stack will be used) /// /// The function returns a mutable reference to the entry's options that allows /// further customization. @@ -798,7 +835,11 @@ impl Entry { } /// A common trait for all handler functions usable in [`Entry`]. -pub trait HandlerFuncType { +/// +/// # Safety +/// +/// Implementors have to ensure that `to_virt_addr` returns a valid address. +pub unsafe trait HandlerFuncType { /// Get the virtual address of the handler function. fn to_virt_addr(self) -> VirtAddr; } @@ -806,7 +847,7 @@ pub trait HandlerFuncType { macro_rules! impl_handler_func_type { ($f:ty) => { #[cfg(feature = "abi_x86_interrupt")] - impl HandlerFuncType for $f { + unsafe impl HandlerFuncType for $f { #[inline] fn to_virt_addr(self) -> VirtAddr { VirtAddr::new(self as u64) @@ -821,38 +862,63 @@ impl_handler_func_type!(PageFaultHandlerFunc); impl_handler_func_type!(DivergingHandlerFunc); impl_handler_func_type!(DivergingHandlerFuncWithErrCode); -/// Represents the options field of an IDT entry. -#[repr(transparent)] +/// Represents the 4 non-offset bytes of an IDT entry. +#[repr(C)] #[derive(Clone, Copy, PartialEq)] -pub struct EntryOptions(u16); +pub struct EntryOptions { + cs: SegmentSelector, + bits: u16, +} impl fmt::Debug for EntryOptions { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_tuple("EntryOptions") - .field(&format_args!("{:#06x}", self.0)) + f.debug_struct("EntryOptions") + .field("code_selector", &self.cs) + .field("stack_index", &self.stack_index()) + .field("type", &format_args!("{:#04b}", self.bits.get_bits(8..12))) + .field("privilege_level", &self.privilege_level()) + .field("present", &self.present()) .finish() } } impl EntryOptions { - /// Creates a minimal options field with all the must-be-one bits set. + /// Creates a minimal options field with all the must-be-one bits set. This + /// means the CS selector, IST, and DPL field are all 0. #[inline] const fn minimal() -> Self { - EntryOptions(0b1110_0000_0000) + EntryOptions { + cs: SegmentSelector(0), + bits: 0b1110_0000_0000, // Default to a 64-bit Interrupt Gate + } + } + + /// Set the code segment that will be used by this interrupt. + /// + /// ## Safety + /// This function is unsafe because the caller must ensure that the passed + /// segment selector points to a valid, long-mode code segment. + pub unsafe fn set_code_selector(&mut self, cs: SegmentSelector) -> &mut Self { + self.cs = cs; + self } /// Set or reset the preset bit. #[inline] pub fn set_present(&mut self, present: bool) -> &mut Self { - self.0.set_bit(15, present); + self.bits.set_bit(15, present); self } + fn present(&self) -> bool { + self.bits.get_bit(15) + } + /// Let the CPU disable hardware interrupts when the handler is invoked. By default, /// interrupts are disabled on handler invocation. #[inline] pub fn disable_interrupts(&mut self, disable: bool) -> &mut Self { - self.0.set_bit(8, !disable); + self.bits.set_bit(8, !disable); self } @@ -860,10 +926,14 @@ impl EntryOptions { /// or 3, the default is 0. If CPL < DPL, a general protection fault occurs. #[inline] pub fn set_privilege_level(&mut self, dpl: PrivilegeLevel) -> &mut Self { - self.0.set_bits(13..15, dpl as u16); + self.bits.set_bits(13..15, dpl as u16); self } + fn privilege_level(&self) -> PrivilegeLevel { + PrivilegeLevel::from_u16(self.bits.get_bits(13..15)) + } + /// Assigns a Interrupt Stack Table (IST) stack to this handler. The CPU will then always /// switch to the specified stack before the handler is invoked. This allows kernels to /// recover from corrupt stack pointers (e.g., on kernel stack overflow). @@ -881,9 +951,13 @@ impl EntryOptions { pub unsafe fn set_stack_index(&mut self, index: u16) -> &mut Self { // The hardware IST index starts at 1, but our software IST index // starts at 0. Therefore we need to add 1 here. - self.0.set_bits(0..3, index + 1); + self.bits.set_bits(0..3, index + 1); self } + + fn stack_index(&self) -> u16 { + self.bits.get_bits(0..3) - 1 + } } /// Wrapper type for the interrupt stack frame pushed by the CPU. @@ -893,10 +967,8 @@ impl EntryOptions { /// This wrapper type ensures that no accidental modification of the interrupt stack frame /// occurs, which can cause undefined behavior (see the [`as_mut`](InterruptStackFrame::as_mut) /// method for more information). -#[repr(C)] -pub struct InterruptStackFrame { - value: InterruptStackFrameValue, -} +#[repr(transparent)] +pub struct InterruptStackFrame(InterruptStackFrameValue); impl InterruptStackFrame { /// Gives mutable access to the contents of the interrupt stack frame. @@ -915,7 +987,7 @@ impl InterruptStackFrame { /// officially supported by LLVM's x86 interrupt calling convention. #[inline] pub unsafe fn as_mut(&mut self) -> Volatile<&mut InterruptStackFrameValue> { - Volatile::new(&mut self.value) + Volatile::new(&mut self.0) } } @@ -924,14 +996,14 @@ impl Deref for InterruptStackFrame { #[inline] fn deref(&self) -> &Self::Target { - &self.value + &self.0 } } impl fmt::Debug for InterruptStackFrame { #[inline] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - self.value.fmt(f) + self.0.fmt(f) } } @@ -945,14 +1017,16 @@ pub struct InterruptStackFrameValue { /// this value points to the faulting instruction, so that the instruction is restarted on /// return. See the documentation of the [`InterruptDescriptorTable`] fields for more details. pub instruction_pointer: VirtAddr, - /// The code segment selector, padded with zeros. - pub code_segment: u64, + /// The code segment selector at the time of the interrupt. + pub code_segment: SegmentSelector, + _reserved1: [u8; 6], /// The flags register before the interrupt handler was invoked. - pub cpu_flags: u64, + pub cpu_flags: RFlags, /// The stack pointer at the time of the interrupt. pub stack_pointer: VirtAddr, /// The stack segment descriptor at the time of the interrupt (often zero in 64-bit mode). - pub stack_segment: u64, + pub stack_segment: SegmentSelector, + _reserved2: [u8; 6], } impl InterruptStackFrameValue { @@ -972,17 +1046,17 @@ impl InterruptStackFrameValue { pub unsafe fn iretq(&self) -> ! { unsafe { core::arch::asm!( - "push {stack_segment}", + "push {stack_segment:r}", "push {new_stack_pointer}", "push {rflags}", - "push {code_segment}", + "push {code_segment:r}", "push {new_instruction_pointer}", "iretq", - rflags = in(reg) self.cpu_flags, + rflags = in(reg) self.cpu_flags.bits(), new_instruction_pointer = in(reg) self.instruction_pointer.as_u64(), new_stack_pointer = in(reg) self.stack_pointer.as_u64(), - code_segment = in(reg) self.code_segment, - stack_segment = in(reg) self.stack_segment, + code_segment = in(reg) self.code_segment.0, + stack_segment = in(reg) self.stack_segment.0, options(noreturn) ) } @@ -991,17 +1065,10 @@ impl InterruptStackFrameValue { impl fmt::Debug for InterruptStackFrameValue { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - struct Hex(u64); - impl fmt::Debug for Hex { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{:#x}", self.0) - } - } - let mut s = f.debug_struct("InterruptStackFrame"); s.field("instruction_pointer", &self.instruction_pointer); s.field("code_segment", &self.code_segment); - s.field("cpu_flags", &Hex(self.cpu_flags)); + s.field("cpu_flags", &self.cpu_flags); s.field("stack_pointer", &self.stack_pointer); s.field("stack_segment", &self.stack_segment); s.finish() @@ -1056,14 +1123,6 @@ bitflags! { } } -impl PageFaultErrorCode { - #[deprecated = "use the safe `from_bits_retain` method instead"] - /// Convert from underlying bit representation, preserving all bits (even those not corresponding to a defined flag). - pub const unsafe fn from_bits_unchecked(bits: u64) -> Self { - Self::from_bits_retain(bits) - } -} - /// Describes an error code referencing a segment selector. #[derive(Clone, Copy, PartialEq, Eq, Hash)] #[repr(transparent)] @@ -1425,7 +1484,7 @@ macro_rules! set_general_handler_entry { extern "x86-interrupt" fn handler(frame: $crate::structures::idt::InterruptStackFrame) { $handler(frame, $idx.into(), None); } - $idt[$idx as usize].set_handler_fn(handler); + $idt[$idx].set_handler_fn(handler); }}; } @@ -1434,7 +1493,7 @@ mod test { use super::*; #[allow(dead_code)] - fn entry_present(idt: &InterruptDescriptorTable, index: usize) -> bool { + fn entry_present(idt: &InterruptDescriptorTable, index: u8) -> bool { let options = match index { 8 => &idt.double_fault.options, 10 => &idt.invalid_tss.options, @@ -1446,14 +1505,14 @@ mod test { 17 => &idt.alignment_check.options, 18 => &idt.machine_check.options, 21 => &idt.cp_protection_exception.options, - i @ 22..=27 => &idt.reserved_2[i - 22].options, + i @ 22..=27 => &idt.reserved_2[usize::from(i) - 22].options, 28 => &idt.hv_injection_exception.options, 29 => &idt.vmm_communication_exception.options, 30 => &idt.security_exception.options, 31 => &idt.reserved_3.options, other => &idt[other].options, }; - options.0.get_bit(15) + options.bits.get_bit(15) } #[test] @@ -1461,6 +1520,8 @@ mod test { use core::mem::size_of; assert_eq!(size_of::>(), 16); assert_eq!(size_of::(), 256 * 16); + assert_eq!(size_of::(), 40); + assert_eq!(size_of::(), 40); } #[cfg(all(feature = "instructions", feature = "abi_x86_interrupt"))] @@ -1478,7 +1539,7 @@ mod test { let mut idt = InterruptDescriptorTable::new(); set_general_handler!(&mut idt, general_handler, 0); - for i in 0..256 { + for i in 0..=255 { if i == 0 { assert!(entry_present(&idt, i)); } else { @@ -1486,7 +1547,7 @@ mod test { } } set_general_handler!(&mut idt, general_handler, 14); - for i in 0..256 { + for i in 0..=255 { if i == 0 || i == 14 { assert!(entry_present(&idt, i)); } else { @@ -1494,7 +1555,7 @@ mod test { } } set_general_handler!(&mut idt, general_handler, 32..64); - for i in 1..256 { + for i in 1..=255 { if i == 0 || i == 14 || (32..64).contains(&i) { assert!(entry_present(&idt, i), "{}", i); } else { @@ -1502,7 +1563,7 @@ mod test { } } set_general_handler!(&mut idt, general_handler); - for i in 0..256 { + for i in 0..=255 { if i == 15 || i == 31 || (22..=27).contains(&i) { // reserved entries should not be set assert!(!entry_present(&idt, i)); @@ -1518,8 +1579,7 @@ mod test { foo(Entry:: { pointer_low: 0, - gdt_selector: 0, - options: EntryOptions(0), + options: EntryOptions::minimal(), pointer_middle: 0, pointer_high: 0, reserved: 0, @@ -1529,15 +1589,15 @@ mod test { #[test] fn isr_frame_manipulation() { - let mut frame = InterruptStackFrame { - value: InterruptStackFrameValue { - instruction_pointer: VirtAddr::new(0x1000), - code_segment: 0, - cpu_flags: 0, - stack_pointer: VirtAddr::new(0x2000), - stack_segment: 0, - }, - }; + let mut frame = InterruptStackFrame(InterruptStackFrameValue { + instruction_pointer: VirtAddr::new(0x1000), + code_segment: SegmentSelector(0), + cpu_flags: RFlags::empty(), + stack_pointer: VirtAddr::new(0x2000), + stack_segment: SegmentSelector(0), + _reserved1: Default::default(), + _reserved2: Default::default(), + }); unsafe { frame diff --git a/src/structures/paging/frame.rs b/src/structures/paging/frame.rs index 64935caee..6cae8faba 100644 --- a/src/structures/paging/frame.rs +++ b/src/structures/paging/frame.rs @@ -86,7 +86,7 @@ impl fmt::Debug for PhysFrame { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.write_fmt(format_args!( "PhysFrame[{}]({:#x})", - S::SIZE_AS_DEBUG_STR, + S::DEBUG_STR, self.start_address().as_u64() )) } diff --git a/src/structures/paging/mapper/mapped_page_table.rs b/src/structures/paging/mapper/mapped_page_table.rs index 5537fd02b..25e83e123 100644 --- a/src/structures/paging/mapper/mapped_page_table.rs +++ b/src/structures/paging/mapper/mapped_page_table.rs @@ -37,9 +37,14 @@ impl<'a, P: PageTableFrameMapping> MappedPageTable<'a, P> { } } + /// Returns an immutable reference to the wrapped level 4 `PageTable` instance. + pub fn level_4_table(&self) -> &PageTable { + self.level_4_table + } + /// Returns a mutable reference to the wrapped level 4 `PageTable` instance. - pub fn level_4_table(&mut self) -> &mut PageTable { - &mut self.level_4_table + pub fn level_4_table_mut(&mut self) -> &mut PageTable { + self.level_4_table } /// Returns the `PageTableFrameMapping` used for converting virtual to physical addresses. diff --git a/src/structures/paging/mapper/mod.rs b/src/structures/paging/mapper/mod.rs index 057bb46cc..ffd173dd5 100644 --- a/src/structures/paging/mapper/mod.rs +++ b/src/structures/paging/mapper/mod.rs @@ -290,7 +290,7 @@ pub trait Mapper { /// /// This method is unsafe because changing the flags of a mapping /// might result in undefined behavior. For example, setting the - /// `GLOBAL` and `MUTABLE` flags for a page might result in the corruption + /// `GLOBAL` and `WRITABLE` flags for a page might result in the corruption /// of values stored in that page from processes running in other address /// spaces. unsafe fn update_flags( @@ -377,8 +377,8 @@ pub trait Mapper { /// This type represents a page whose mapping has changed in the page table. /// /// The old mapping might be still cached in the translation lookaside buffer (TLB), so it needs -/// to be flushed from the TLB before it's accessed. This type is returned from function that -/// change the mapping of a page to ensure that the TLB flush is not forgotten. +/// to be flushed from the TLB before it's accessed. This type is returned from a function that +/// changed the mapping of a page to ensure that the TLB flush is not forgotten. #[derive(Debug)] #[must_use = "Page Table changes must be flushed or ignored."] pub struct MapperFlush(Page); diff --git a/src/structures/paging/mapper/offset_page_table.rs b/src/structures/paging/mapper/offset_page_table.rs index 7a44132c8..895f0773e 100644 --- a/src/structures/paging/mapper/offset_page_table.rs +++ b/src/structures/paging/mapper/offset_page_table.rs @@ -38,11 +38,16 @@ impl<'a> OffsetPageTable<'a> { } } - /// Returns a mutable reference to the wrapped level 4 `PageTable` instance. - pub fn level_4_table(&mut self) -> &mut PageTable { + /// Returns an immutable reference to the wrapped level 4 `PageTable` instance. + pub fn level_4_table(&self) -> &PageTable { self.inner.level_4_table() } + /// Returns a mutable reference to the wrapped level 4 `PageTable` instance. + pub fn level_4_table_mut(&mut self) -> &mut PageTable { + self.inner.level_4_table_mut() + } + /// Returns the offset used for converting virtual to physical addresses. pub fn phys_offset(&self) -> VirtAddr { self.inner.page_table_frame_mapping().offset diff --git a/src/structures/paging/mapper/recursive_page_table.rs b/src/structures/paging/mapper/recursive_page_table.rs index 4e5002215..0053f3251 100644 --- a/src/structures/paging/mapper/recursive_page_table.rs +++ b/src/structures/paging/mapper/recursive_page_table.rs @@ -91,9 +91,14 @@ impl<'a> RecursivePageTable<'a> { } } + /// Returns an immutable reference to the wrapped level 4 `PageTable` instance. + pub fn level_4_table(&self) -> &PageTable { + self.p4 + } + /// Returns a mutable reference to the wrapped level 4 `PageTable` instance. - pub fn level_4_table(&mut self) -> &mut PageTable { - &mut self.p4 + pub fn level_4_table_mut(&mut self) -> &mut PageTable { + self.p4 } /// Internal helper function to create the page table of the next level if needed. @@ -916,7 +921,7 @@ impl<'a> CleanUp for RecursivePageTable<'a> { clean_up( self.recursive_index, - self.level_4_table(), + self.level_4_table_mut(), PageTableLevel::Four, range, frame_deallocator, diff --git a/src/structures/paging/page.rs b/src/structures/paging/page.rs index 88494aefa..a5977e28b 100644 --- a/src/structures/paging/page.rs +++ b/src/structures/paging/page.rs @@ -1,5 +1,6 @@ //! Abstractions for default-sized and huge virtual memory pages. +use crate::sealed::Sealed; use crate::structures::paging::page_table::PageTableLevel; use crate::structures::paging::PageTableIndex; use crate::VirtAddr; @@ -10,12 +11,9 @@ use core::marker::PhantomData; use core::ops::{Add, AddAssign, Sub, SubAssign}; /// Trait for abstracting over the three possible page sizes on x86_64, 4KiB, 2MiB, 1GiB. -pub trait PageSize: Copy + Eq + PartialOrd + Ord { +pub trait PageSize: Copy + Eq + PartialOrd + Ord + Sealed { /// The page size in bytes. const SIZE: u64; - - /// A string representation of the page size for debug output. - const SIZE_AS_DEBUG_STR: &'static str; } /// This trait is implemented for 4KiB and 2MiB pages, but not for 1GiB pages. @@ -37,21 +35,30 @@ pub enum Size1GiB {} impl PageSize for Size4KiB { const SIZE: u64 = 4096; - const SIZE_AS_DEBUG_STR: &'static str = "4KiB"; } impl NotGiantPageSize for Size4KiB {} +impl Sealed for super::Size4KiB { + const DEBUG_STR: &'static str = "4KiB"; +} + impl PageSize for Size2MiB { const SIZE: u64 = Size4KiB::SIZE * 512; - const SIZE_AS_DEBUG_STR: &'static str = "2MiB"; } impl NotGiantPageSize for Size2MiB {} +impl Sealed for super::Size2MiB { + const DEBUG_STR: &'static str = "2MiB"; +} + impl PageSize for Size1GiB { const SIZE: u64 = Size2MiB::SIZE * 512; - const SIZE_AS_DEBUG_STR: &'static str = "1GiB"; +} + +impl Sealed for super::Size1GiB { + const DEBUG_STR: &'static str = "1GiB"; } /// A virtual memory page. @@ -187,7 +194,7 @@ impl Page { let mut addr = 0; addr.set_bits(39..48, u64::from(p4_index)); addr.set_bits(30..39, u64::from(p3_index)); - Page::containing_address(VirtAddr::new(addr)) + Page::containing_address(VirtAddr::new_truncate(addr)) } } @@ -205,7 +212,7 @@ impl Page { addr.set_bits(39..48, u64::from(p4_index)); addr.set_bits(30..39, u64::from(p3_index)); addr.set_bits(21..30, u64::from(p2_index)); - Page::containing_address(VirtAddr::new(addr)) + Page::containing_address(VirtAddr::new_truncate(addr)) } } @@ -225,7 +232,7 @@ impl Page { addr.set_bits(30..39, u64::from(p3_index)); addr.set_bits(21..30, u64::from(p2_index)); addr.set_bits(12..21, u64::from(p1_index)); - Page::containing_address(VirtAddr::new(addr)) + Page::containing_address(VirtAddr::new_truncate(addr)) } /// Returns the level 1 page table index of this page. @@ -239,7 +246,7 @@ impl fmt::Debug for Page { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.write_fmt(format_args!( "Page[{}]({:#x})", - S::SIZE_AS_DEBUG_STR, + S::DEBUG_STR, self.start_address().as_u64() )) } diff --git a/src/structures/paging/page_table.rs b/src/structures/paging/page_table.rs index 0af5350e1..03203ed52 100644 --- a/src/structures/paging/page_table.rs +++ b/src/structures/paging/page_table.rs @@ -169,14 +169,6 @@ bitflags! { } } -impl PageTableFlags { - #[deprecated = "use the safe `from_bits_retain` method instead"] - /// Convert from underlying bit representation, preserving all bits (even those not corresponding to a defined flag). - pub const unsafe fn from_bits_unchecked(bits: u64) -> Self { - Self::from_bits_retain(bits) - } -} - /// The number of entries in a page table. const ENTRY_COUNT: usize = 512; diff --git a/testing/Cargo.toml b/testing/Cargo.toml index fe08cc67c..7a32657c1 100644 --- a/testing/Cargo.toml +++ b/testing/Cargo.toml @@ -5,7 +5,7 @@ authors = ["Philipp Oppermann "] edition = "2018" [[test]] -name = "breakpoint_exception" +name = "interrupt_handling" harness = false [[test]] diff --git a/testing/src/gdt.rs b/testing/src/gdt.rs index 70f4df5cf..28024da9e 100644 --- a/testing/src/gdt.rs +++ b/testing/src/gdt.rs @@ -13,15 +13,17 @@ lazy_static! { static mut STACK: [u8; STACK_SIZE] = [0; STACK_SIZE]; let stack_start = VirtAddr::from_ptr(unsafe { &STACK }); - let stack_end = stack_start + STACK_SIZE; + let stack_end = stack_start + STACK_SIZE as u64; stack_end }; tss }; static ref GDT: (GlobalDescriptorTable, Selectors) = { let mut gdt = GlobalDescriptorTable::new(); - let code_selector = gdt.add_entry(Descriptor::kernel_code_segment()); - let tss_selector = gdt.add_entry(Descriptor::tss_segment(&TSS)); + // Add an unused segment so we get a different value for CS + gdt.append(Descriptor::kernel_data_segment()); + let code_selector = gdt.append(Descriptor::kernel_code_segment()); + let tss_selector = gdt.append(Descriptor::tss_segment(&TSS)); ( gdt, Selectors { @@ -38,12 +40,19 @@ struct Selectors { } pub fn init() { - use x86_64::instructions::segmentation::{CS, Segment}; + use x86_64::instructions::segmentation::{Segment, CS}; use x86_64::instructions::tables::load_tss; + // Make sure loading CS actually changes the value GDT.0.load(); - unsafe { - CS::set_reg(GDT.1.code_selector); - load_tss(GDT.1.tss_selector); - } + assert_ne!(CS::get_reg(), GDT.1.code_selector); + unsafe { CS::set_reg(GDT.1.code_selector) }; + assert_eq!(CS::get_reg(), GDT.1.code_selector); + + // Loading the TSS should mark the GDT entry as busy + let tss_idx: usize = GDT.1.tss_selector.index().into(); + let old_tss_entry = GDT.0.entries()[tss_idx].clone(); + unsafe { load_tss(GDT.1.tss_selector) }; + let new_tss_entry = GDT.0.entries()[tss_idx].clone(); + assert_ne!(old_tss_entry, new_tss_entry); } diff --git a/testing/tests/breakpoint_exception.rs b/testing/tests/interrupt_handling.rs similarity index 55% rename from testing/tests/breakpoint_exception.rs rename to testing/tests/interrupt_handling.rs index 573819030..120f95515 100644 --- a/testing/tests/breakpoint_exception.rs +++ b/testing/tests/interrupt_handling.rs @@ -7,7 +7,10 @@ use core::sync::atomic::{AtomicUsize, Ordering}; use lazy_static::lazy_static; use testing::{exit_qemu, serial_print, serial_println, QemuExitCode}; +use x86_64::instructions::interrupts; + static BREAKPOINT_HANDLER_CALLED: AtomicUsize = AtomicUsize::new(0); +static INTERRUPT_HANDLER_CALLED: AtomicUsize = AtomicUsize::new(0); #[no_mangle] pub extern "C" fn _start() -> ! { @@ -16,13 +19,10 @@ pub extern "C" fn _start() -> ! { init_test_idt(); // invoke a breakpoint exception - x86_64::instructions::interrupts::int3(); + interrupts::int3(); match BREAKPOINT_HANDLER_CALLED.load(Ordering::SeqCst) { - 1 => { - serial_println!("[ok]"); - exit_qemu(QemuExitCode::Success); - } + 1 => {} 0 => { serial_println!("[failed]"); serial_println!(" Breakpoint handler was not called."); @@ -35,6 +35,29 @@ pub extern "C" fn _start() -> ! { } } + serial_print!("interrupt 42... "); + unsafe { interrupts::software_interrupt::<42>() }; + serial_print!("interrupt 77... "); + unsafe { interrupts::software_interrupt::<77>() }; + serial_print!("interrupt 42... "); + unsafe { interrupts::software_interrupt::<42>() }; + + match INTERRUPT_HANDLER_CALLED.load(Ordering::SeqCst) { + 3 => {} + 0 => { + serial_println!("[failed]"); + serial_println!(" Interrupt handler was not called."); + exit_qemu(QemuExitCode::Failed); + } + other => { + serial_println!("[failed]"); + serial_println!(" Interrupt handler was called {} times", other); + exit_qemu(QemuExitCode::Failed); + } + } + + serial_println!("[ok]"); + exit_qemu(QemuExitCode::Success); loop {} } @@ -49,6 +72,8 @@ lazy_static! { static ref TEST_IDT: InterruptDescriptorTable = { let mut idt = InterruptDescriptorTable::new(); idt.breakpoint.set_handler_fn(breakpoint_handler); + idt[42].set_handler_fn(interrupt_handler); + idt[77].set_handler_fn(interrupt_handler); idt }; } @@ -60,3 +85,7 @@ pub fn init_test_idt() { extern "x86-interrupt" fn breakpoint_handler(_stack_frame: InterruptStackFrame) { BREAKPOINT_HANDLER_CALLED.fetch_add(1, Ordering::SeqCst); } + +extern "x86-interrupt" fn interrupt_handler(_stack_frame: InterruptStackFrame) { + INTERRUPT_HANDLER_CALLED.fetch_add(1, Ordering::SeqCst); +}