diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 7b60dba22..2c75adf9a 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -50,21 +50,29 @@ jobs: rustc -Vv cargo -Vv - - name: Cache binaries - id: cache-bin - uses: actions/cache@v1 - with: - path: binaries - key: ${{ runner.OS }}-binaries - - name: Add binaries/bin to PATH - run: echo "$GITHUB_WORKSPACE/binaries/bin" >> $GITHUB_PATH - shell: bash - - name: "Run cargo build" uses: actions-rs/cargo@v1 with: command: build + - name: "Run cargo doc" + uses: actions-rs/cargo@v1 + with: + command: doc + + - name: "Run cargo doc for stable" + uses: actions-rs/cargo@v1 + with: + command: doc + args: --no-default-features --features external_asm,instructions + if: runner.os != 'Windows' + + - name: "Run cargo doc without default features" + uses: actions-rs/cargo@v1 + with: + command: doc + args: --no-default-features + - name: "Run cargo build for stable without instructions" uses: actions-rs/cargo@v1 with: @@ -113,6 +121,35 @@ jobs: cargo build --target i686-unknown-linux-gnu --no-default-features --features nightly cargo build --target thumbv7em-none-eabihf --no-default-features --features nightly + bootloader-test: + name: "Bootloader Integration Test" + + strategy: + fail-fast: false + matrix: + platform: [ + ubuntu-latest, + macos-latest, + windows-latest + ] + + runs-on: ${{ matrix.platform }} + timeout-minutes: 15 + + steps: + - name: "Checkout Repository" + uses: actions/checkout@v1 + + - name: Cache binaries + id: cache-bin + uses: actions/cache@v1 + with: + path: binaries + key: ${{ runner.OS }}-binaries + - name: Add binaries/bin to PATH + run: echo "$GITHUB_WORKSPACE/binaries/bin" >> $GITHUB_PATH + shell: bash + - name: "Install Rustup Components" run: rustup component add rust-src llvm-tools-preview - name: "Install cargo-xbuild" @@ -133,14 +170,10 @@ jobs: HOMEBREW_NO_AUTO_UPDATE: 1 HOMEBREW_NO_BOTTLE_SOURCE_FALLBACK: 1 HOMEBREW_NO_INSTALL_CLEANUP: 1 - - name: Install Scoop (Windows) - run: | - Invoke-Expression (New-Object System.Net.WebClient).DownloadString('https://get.scoop.sh') - echo "$HOME\scoop\shims" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append - if: runner.os == 'Windows' - shell: pwsh - name: Install QEMU (Windows) - run: scoop install qemu + run: | + choco install qemu --version 2021.5.5 + echo "$Env:Programfiles\qemu" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append if: runner.os == 'Windows' shell: pwsh diff --git a/Cargo.toml b/Cargo.toml index 999c11f0a..e22d70a77 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -22,11 +22,11 @@ license = "MIT/Apache-2.0" name = "x86_64" readme = "README.md" repository = "https://github.com/rust-osdev/x86_64" -version = "0.14.4" +version = "0.14.7" edition = "2018" [dependencies] -bit_field = "0.9.0" +bit_field = "0.10.1" bitflags = "1.3.2" volatile = "0.4.4" @@ -37,13 +37,11 @@ cc = { version = "1.0.37", optional = true } default = [ "nightly", "instructions" ] instructions = [] external_asm = [ "cc" ] -nightly = [ "inline_asm", "const_fn", "abi_x86_interrupt" ] +nightly = [ "inline_asm", "const_fn", "abi_x86_interrupt", "doc_cfg" ] inline_asm = [] abi_x86_interrupt = [] const_fn = [] - -[package.metadata.docs.rs] -rustdoc-args = ["--cfg", "docsrs"] +doc_cfg = [] [package.metadata.release] no-dev-version = true diff --git a/Changelog.md b/Changelog.md index ab5e428ad..8f57b26b2 100644 --- a/Changelog.md +++ b/Changelog.md @@ -1,5 +1,38 @@ # Unreleased +# 0.14.7 – 2021-12-18 + +- fix: build error on the latest nightly ([#329](https://github.com/rust-osdev/x86_64/pull/329)) +- add `set_general_handler` macro ([#285](https://github.com/rust-osdev/x86_64/pull/285)) +- Derive common traits for number, range and enum types ([#315](https://github.com/rust-osdev/x86_64/pull/315)) +- Add the VMM Communication Exception (`#VC`) to the `InterruptDescriptorTable` ([#313](https://github.com/rust-osdev/x86_64/pull/313)) +- fix: enable manipulation of `InterruptStackFrame` ([#312](https://github.com/rust-osdev/x86_64/pull/312)) +- fix docs for `page_table_index` ([#318](https://github.com/rust-osdev/x86_64/pull/318)) +- Remove redundant alignment check ([#314](https://github.com/rust-osdev/x86_64/pull/314)) +- fix(idt): fix panic messages for `index` and `#VC` ([#321](https://github.com/rust-osdev/x86_64/pull/321)) +- remove `const_assert!` in favor of std's `assert!` ([#326](https://github.com/rust-osdev/x86_64/pull/326)) +- Move bootloader integration test to separate CI job ([#330](https://github.com/rust-osdev/x86_64/pull/330)) + +# 0.14.6 – 2021-09-20 + +- New `registers::segmentation` module ([#309](https://github.com/rust-osdev/x86_64/pull/309)), containing: + - `instructions::segmentation::{Segment, Segment64, CS, DS, ES, FS, GS, SS}` + - `structures::gdt::SegmentSelector` + - Old locations still re-export all the types, so this is not a breaking change. +- Fixes build so that `cargo doc --no-default-features` succeeds. + +# 0.14.5 – 2021-09-04 + +- Add `ExceptionVector` enum and additional flags to `PageFaultErrorCode` ([#303](https://github.com/rust-osdev/x86_64/pull/303)) +- Add `clean_up` and `clean_up_with_filter` methods to deallocate unused page tables ([#264](https://github.com/rust-osdev/x86_64/pull/264)) +- Rename some XCr0 and CR4 flags (#[275](https://github.com/rust-osdev/x86_64/pull/275)) +- Expose `MapperFlush::new` and `MapperFlushAll::new` constructor functions ([#296](https://github.com/rust-osdev/x86_64/pull/296)) +- Use `#[cfg(doc)]` instead of docs.rs-specific cfg flag (#[287](https://github.com/rust-osdev/x86_64/pull/287)) +- Some documentation updates: + - Update segment register references in `GDT::load*` method to non-deprecated methods ([#301](https://github.com/rust-osdev/x86_64/pull/301)) + - Remove a panic note ([#300](https://github.com/rust-osdev/x86_64/pull/300)) +- Update `bit_field` dependency ([#306](https://github.com/rust-osdev/x86_64/pull/306)) + # 0.14.4 – 2021-07-19 - Add `instructions::tables::sgdt` ([#279](https://github.com/rust-osdev/x86_64/pull/279)) diff --git a/src/addr.rs b/src/addr.rs index 1c93f0219..b71d7b63d 100644 --- a/src/addr.rs +++ b/src/addr.rs @@ -3,6 +3,7 @@ use core::fmt; use core::ops::{Add, AddAssign, Sub, SubAssign}; +use crate::structures::paging::page_table::PageTableLevel; use crate::structures::paging::{PageOffset, PageTableIndex}; use bit_field::BitField; @@ -16,7 +17,7 @@ use bit_field::BitField; /// On `x86_64`, only the 48 lower bits of a virtual address can be used. The top 16 bits need /// to be copies of bit 47, i.e. the most significant bit. Addresses that fulfil this criterium /// are called “canonical”. This type guarantees that it always represents a canonical address. -#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] #[repr(transparent)] pub struct VirtAddr(u64); @@ -29,7 +30,7 @@ pub struct VirtAddr(u64); /// /// On `x86_64`, only the 52 lower bits of a physical address can be used. The top 12 bits need /// to be zero. This type guarantees that it always represents a valid physical address. -#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] #[repr(transparent)] pub struct PhysAddr(u64); @@ -198,6 +199,12 @@ impl VirtAddr { pub const fn p4_index(self) -> PageTableIndex { PageTableIndex::new_truncate((self.0 >> 12 >> 9 >> 9 >> 9) as u16) } + + /// Returns the 9-bit level page table index. + #[inline] + pub const fn page_table_index(self, level: PageTableLevel) -> PageTableIndex { + PageTableIndex::new_truncate((self.0 >> 12 >> ((level as u8 - 1) * 9)) as u16) + } } impl fmt::Debug for VirtAddr { @@ -537,7 +544,7 @@ impl Sub for PhysAddr { /// feature, the panic message will be "index out of bounds". #[inline] pub const fn align_down(addr: u64, align: u64) -> u64 { - const_assert!(align.is_power_of_two(), "`align` must be a power of two"); + assert!(align.is_power_of_two(), "`align` must be a power of two"); addr & !(align - 1) } @@ -549,7 +556,7 @@ pub const fn align_down(addr: u64, align: u64) -> u64 { /// feature, the panic message will be "index out of bounds". #[inline] pub const fn align_up(addr: u64, align: u64) -> u64 { - const_assert!(align.is_power_of_two(), "`align` must be a power of two"); + assert!(align.is_power_of_two(), "`align` must be a power of two"); let align_mask = align - 1; if addr & align_mask == 0 { addr // already aligned diff --git a/src/asm/asm.s b/src/asm/asm.s index dfe0ff50c..831828a82 100644 --- a/src/asm/asm.s +++ b/src/asm/asm.s @@ -334,3 +334,19 @@ _x86_64_asm_xsetbv: movl %esi, %eax # Second param is the low 32-bits xsetbv # Third param (high 32-bits) is already in %edx retq + +.global _x86_64_asm_write_mxcsr +.p2align 4 +_x86_64_asm_write_mxcsr: + pushq %rdi + ldmxcsr (%rsp) + popq %rdi + retq + +.global _x86_64_asm_read_mxcsr +.p2align 4 +_x86_64_asm_read_mxcsr: + pushq $0 + stmxcsr (%rsp) + popq %rax + retq diff --git a/src/asm/mod.rs b/src/asm/mod.rs index 9ca4e0ca9..6d481dad9 100644 --- a/src/asm/mod.rs +++ b/src/asm/mod.rs @@ -299,4 +299,16 @@ extern "sysv64" { link_name = "_x86_64_asm_xsetbv" )] pub(crate) fn x86_64_asm_xsetbv(xcr: u32, low: u32, high: u32); + + #[cfg_attr( + any(target_env = "gnu", target_env = "musl"), + link_name = "_x86_64_asm_read_mxcsr" + )] + pub(crate) fn x86_64_asm_read_mxcsr() -> u32; + + #[cfg_attr( + any(target_env = "gnu", target_env = "musl"), + link_name = "_x86_64_asm_write_mxcsr" + )] + pub(crate) fn x86_64_asm_write_mxcsr(val: u32); } diff --git a/src/instructions/interrupts.rs b/src/instructions/interrupts.rs index b9b41a714..527f20a74 100644 --- a/src/instructions/interrupts.rs +++ b/src/instructions/interrupts.rs @@ -1,5 +1,8 @@ //! Enabling and disabling interrupts +#[cfg(feature = "inline_asm")] +use core::arch::asm; + /// Returns whether interrupts are enabled. #[inline] pub fn are_enabled() -> bool { @@ -156,7 +159,12 @@ pub fn int3() { /// It can also cause memory/register corruption depending on the interrupt /// implementation (if it expects values/pointers to be passed in registers). #[cfg(feature = "inline_asm")] -#[cfg_attr(docsrs, doc(cfg(any(feature = "nightly", feature = "inline_asm"))))] +#[cfg_attr( + feature = "doc_cfg", + doc(cfg(any(feature = "nightly", feature = "inline_asm"))) +)] pub unsafe fn software_interrupt() { - asm!("int {num}", num = const NUM, options(nomem, nostack)); + unsafe { + asm!("int {num}", num = const NUM, options(nomem, nostack)); + } } diff --git a/src/instructions/mod.rs b/src/instructions/mod.rs index de1c4c3fa..bd35f2a82 100644 --- a/src/instructions/mod.rs +++ b/src/instructions/mod.rs @@ -9,6 +9,9 @@ pub mod segmentation; pub mod tables; pub mod tlb; +#[cfg(feature = "inline_asm")] +use core::arch::asm; + /// Halts the CPU until the next interrupt arrives. #[inline] pub fn hlt() { @@ -54,7 +57,10 @@ pub fn bochs_breakpoint() { /// Gets the current instruction pointer. Note that this is only approximate as it requires a few /// instructions to execute. #[cfg(feature = "inline_asm")] -#[cfg_attr(docsrs, doc(cfg(any(feature = "nightly", feature = "inline_asm"))))] +#[cfg_attr( + feature = "doc_cfg", + doc(cfg(any(feature = "nightly", feature = "inline_asm"))) +)] #[inline(always)] pub fn read_rip() -> crate::VirtAddr { let rip: u64; diff --git a/src/instructions/port.rs b/src/instructions/port.rs index aa3c40137..b23be4401 100644 --- a/src/instructions/port.rs +++ b/src/instructions/port.rs @@ -1,5 +1,7 @@ //! Access to I/O ports +#[cfg(feature = "inline_asm")] +use core::arch::asm; use core::fmt; use core::marker::PhantomData; @@ -11,11 +13,15 @@ impl PortRead for u8 { #[cfg(feature = "inline_asm")] { let value: u8; - asm!("in al, dx", out("al") value, in("dx") port, options(nomem, nostack, preserves_flags)); + unsafe { + asm!("in al, dx", out("al") value, in("dx") port, options(nomem, nostack, preserves_flags)); + } value } #[cfg(not(feature = "inline_asm"))] - crate::asm::x86_64_asm_read_from_port_u8(port) + unsafe { + crate::asm::x86_64_asm_read_from_port_u8(port) + } } } @@ -25,11 +31,15 @@ impl PortRead for u16 { #[cfg(feature = "inline_asm")] { let value: u16; - asm!("in ax, dx", out("ax") value, in("dx") port, options(nomem, nostack, preserves_flags)); + unsafe { + asm!("in ax, dx", out("ax") value, in("dx") port, options(nomem, nostack, preserves_flags)); + } value } #[cfg(not(feature = "inline_asm"))] - crate::asm::x86_64_asm_read_from_port_u16(port) + unsafe { + crate::asm::x86_64_asm_read_from_port_u16(port) + } } } @@ -39,11 +49,15 @@ impl PortRead for u32 { #[cfg(feature = "inline_asm")] { let value: u32; - asm!("in eax, dx", out("eax") value, in("dx") port, options(nomem, nostack, preserves_flags)); + unsafe { + asm!("in eax, dx", out("eax") value, in("dx") port, options(nomem, nostack, preserves_flags)); + } value } #[cfg(not(feature = "inline_asm"))] - crate::asm::x86_64_asm_read_from_port_u32(port) + unsafe { + crate::asm::x86_64_asm_read_from_port_u32(port) + } } } @@ -51,10 +65,14 @@ impl PortWrite for u8 { #[inline] unsafe fn write_to_port(port: u16, value: u8) { #[cfg(feature = "inline_asm")] - asm!("out dx, al", in("dx") port, in("al") value, options(nomem, nostack, preserves_flags)); + unsafe { + asm!("out dx, al", in("dx") port, in("al") value, options(nomem, nostack, preserves_flags)); + } #[cfg(not(feature = "inline_asm"))] - crate::asm::x86_64_asm_write_to_port_u8(port, value); + unsafe { + crate::asm::x86_64_asm_write_to_port_u8(port, value); + } } } @@ -62,10 +80,14 @@ impl PortWrite for u16 { #[inline] unsafe fn write_to_port(port: u16, value: u16) { #[cfg(feature = "inline_asm")] - asm!("out dx, ax", in("dx") port, in("ax") value, options(nomem, nostack, preserves_flags)); + unsafe { + asm!("out dx, ax", in("dx") port, in("ax") value, options(nomem, nostack, preserves_flags)); + } #[cfg(not(feature = "inline_asm"))] - crate::asm::x86_64_asm_write_to_port_u16(port, value); + unsafe { + crate::asm::x86_64_asm_write_to_port_u16(port, value); + } } } @@ -73,10 +95,14 @@ impl PortWrite for u32 { #[inline] unsafe fn write_to_port(port: u16, value: u32) { #[cfg(feature = "inline_asm")] - asm!("out dx, eax", in("dx") port, in("eax") value, options(nomem, nostack, preserves_flags)); + unsafe { + asm!("out dx, eax", in("dx") port, in("eax") value, options(nomem, nostack, preserves_flags)); + } #[cfg(not(feature = "inline_asm"))] - crate::asm::x86_64_asm_write_to_port_u32(port, value); + unsafe { + crate::asm::x86_64_asm_write_to_port_u32(port, value); + } } } @@ -162,7 +188,7 @@ impl PortGeneric { /// safety. #[inline] pub unsafe fn read(&mut self) -> T { - T::read_from_port(self.port) + unsafe { T::read_from_port(self.port) } } } @@ -175,7 +201,7 @@ impl PortGeneric { /// safety. #[inline] pub unsafe fn write(&mut self, value: T) { - T::write_to_port(self.port, value) + unsafe { T::write_to_port(self.port, value) } } } diff --git a/src/instructions/segmentation.rs b/src/instructions/segmentation.rs index b5423095a..fda49eec7 100644 --- a/src/instructions/segmentation.rs +++ b/src/instructions/segmentation.rs @@ -1,63 +1,13 @@ //! Provides functions to read and write segment registers. -#[cfg(docsrs)] -use crate::{ - registers::control::Cr4Flags, - structures::gdt::{Descriptor, GlobalDescriptorTable}, -}; +pub use crate::registers::segmentation::{Segment, Segment64, CS, DS, ES, FS, GS, SS}; use crate::{ registers::model_specific::{FsBase, GsBase, Msr}, structures::gdt::SegmentSelector, VirtAddr, }; - -/// An x86 segment -/// -/// Segment registers on x86 are 16-bit [`SegmentSelector`]s, which index into -/// the [`GlobalDescriptorTable`]. The corresponding GDT entry is used to -/// configure the segment itself. Note that most segmentation functionality is -/// disabled in 64-bit mode. See the individual segments for more information. -pub trait Segment { - /// Returns the current value of the segment register. - fn get_reg() -> SegmentSelector; - /// Reload the segment register. Depending on the segment, this may also - /// reconfigure the corresponding segment. - /// - /// ## Safety - /// - /// This function is unsafe because the caller must ensure that `sel` - /// is a valid segment descriptor, and that reconfiguring the segment will - /// not cause undefined behavior. - unsafe fn set_reg(sel: SegmentSelector); -} - -/// An x86 segment which is actually used in 64-bit mode -/// -/// While most segments are unused in 64-bit mode, the FS and GS segments are -/// still partially used. Only the 64-bit segment base address is used, and this -/// address can be set via the GDT, or by using the `FSGSBASE` instructions. -pub trait Segment64: Segment { - /// MSR containing the segment base. This MSR can be used to set the base - /// when [`CR4.FSGSBASE`][Cr4Flags::FSGSBASE] is **not** set. - const BASE: Msr; - /// Reads the segment base address - /// - /// ## Exceptions - /// - /// If [`CR4.FSGSBASE`][Cr4Flags::FSGSBASE] is not set, this instruction will throw a `#UD`. - fn read_base() -> VirtAddr; - /// Writes the segment base address - /// - /// ## Exceptions - /// - /// If [`CR4.FSGSBASE`][Cr4Flags::FSGSBASE] is not set, this instruction will throw a `#UD`. - /// - /// ## Safety - /// - /// The caller must ensure that this write operation has no unsafe side - /// effects, as the segment base address might be in use. - unsafe fn write_base(base: VirtAddr); -} +#[cfg(feature = "inline_asm")] +use core::arch::asm; macro_rules! get_reg_impl { ($name:literal, $asm_get:ident) => { @@ -83,10 +33,14 @@ macro_rules! segment_impl { unsafe fn set_reg(sel: SegmentSelector) { #[cfg(feature = "inline_asm")] - asm!(concat!("mov ", $name, ", {0:x}"), in(reg) sel.0, options(nostack, preserves_flags)); + unsafe { + asm!(concat!("mov ", $name, ", {0:x}"), in(reg) sel.0, options(nostack, preserves_flags)); + } #[cfg(not(feature = "inline_asm"))] - crate::asm::$asm_load(sel.0); + unsafe{ + crate::asm::$asm_load(sel.0); + } } } }; @@ -111,23 +65,19 @@ macro_rules! segment64_impl { unsafe fn write_base(base: VirtAddr) { #[cfg(feature = "inline_asm")] - asm!(concat!("wr", $name, "base {}"), in(reg) base.as_u64(), options(nostack, preserves_flags)); + unsafe{ + asm!(concat!("wr", $name, "base {}"), in(reg) base.as_u64(), options(nostack, preserves_flags)); + } #[cfg(not(feature = "inline_asm"))] - crate::asm::$asm_wr(base.as_u64()); + unsafe{ + crate::asm::$asm_wr(base.as_u64()); + } } } }; } -/// Code Segment -/// -/// The segment base and limit are unused in 64-bit mode. Only the L (long), D -/// (default operation size), and DPL (descriptor privilege-level) fields of the -/// descriptor are recognized. So changing the segment register can be used to -/// change privilege level or enable/disable long mode. -#[derive(Debug)] -pub struct CS; impl Segment for CS { get_reg_impl!("cs", x86_64_asm_get_cs); @@ -141,66 +91,31 @@ impl Segment for CS { /// for 64-bit far calls/jumps in long-mode, AMD does not. unsafe fn set_reg(sel: SegmentSelector) { #[cfg(feature = "inline_asm")] - asm!( - "push {sel}", - "lea {tmp}, [1f + rip]", - "push {tmp}", - "retfq", - "1:", - sel = in(reg) u64::from(sel.0), - tmp = lateout(reg) _, - options(preserves_flags), - ); + unsafe { + asm!( + "push {sel}", + "lea {tmp}, [1f + rip]", + "push {tmp}", + "retfq", + "1:", + sel = in(reg) u64::from(sel.0), + tmp = lateout(reg) _, + options(preserves_flags), + ); + } #[cfg(not(feature = "inline_asm"))] - crate::asm::x86_64_asm_set_cs(u64::from(sel.0)); + unsafe { + crate::asm::x86_64_asm_set_cs(u64::from(sel.0)); + } } } -/// Stack Segment -/// -/// Entirely unused in 64-bit mode; setting the segment register does nothing. -/// However, in ring 3, the SS register still has to point to a valid -/// [`Descriptor`] (it cannot be zero). This means a user-mode read/write -/// segment descriptor must be present in the GDT. -/// -/// This register is also set by the `syscall`/`sysret` and -/// `sysenter`/`sysexit` instructions (even on 64-bit transitions). This is to -/// maintain symmetry with 32-bit transitions where setting SS actually will -/// actually have an effect. -#[derive(Debug)] -pub struct SS; segment_impl!(SS, "ss", x86_64_asm_get_ss, x86_64_asm_load_ss); - -/// Data Segment -/// -/// Entirely unused in 64-bit mode; setting the segment register does nothing. -#[derive(Debug)] -pub struct DS; segment_impl!(DS, "ds", x86_64_asm_get_ds, x86_64_asm_load_ds); - -/// ES Segment -/// -/// Entirely unused in 64-bit mode; setting the segment register does nothing. -#[derive(Debug)] -pub struct ES; segment_impl!(ES, "es", x86_64_asm_get_es, x86_64_asm_load_es); - -/// FS Segment -/// -/// Only base is used in 64-bit mode, see [`Segment64`]. This is often used in -/// user-mode for Thread-Local Storage (TLS). -#[derive(Debug)] -pub struct FS; segment_impl!(FS, "fs", x86_64_asm_get_fs, x86_64_asm_load_fs); segment64_impl!(FS, "fs", FsBase, x86_64_asm_rdfsbase, x86_64_asm_wrfsbase); - -/// GS Segment -/// -/// Only base is used in 64-bit mode, see [`Segment64`]. In kernel-mode, the GS -/// base often points to a per-cpu kernel data structure. -#[derive(Debug)] -pub struct GS; segment_impl!(GS, "gs", x86_64_asm_get_gs, x86_64_asm_load_gs); segment64_impl!(GS, "gs", GsBase, x86_64_asm_rdgsbase, x86_64_asm_wrgsbase); @@ -213,10 +128,14 @@ impl GS { /// swap operation cannot lead to undefined behavior. pub unsafe fn swap() { #[cfg(feature = "inline_asm")] - asm!("swapgs", options(nostack, preserves_flags)); + unsafe { + asm!("swapgs", options(nostack, preserves_flags)); + } #[cfg(not(feature = "inline_asm"))] - crate::asm::x86_64_asm_swapgs(); + unsafe { + crate::asm::x86_64_asm_swapgs(); + } } } @@ -225,49 +144,49 @@ impl GS { #[allow(clippy::missing_safety_doc)] #[inline] pub unsafe fn set_cs(sel: SegmentSelector) { - CS::set_reg(sel) + unsafe { CS::set_reg(sel) } } /// Alias for [`SS::set_reg()`] #[deprecated(since = "0.14.4", note = "use `SS::set_reg()` instead")] #[allow(clippy::missing_safety_doc)] #[inline] pub unsafe fn load_ss(sel: SegmentSelector) { - SS::set_reg(sel) + unsafe { SS::set_reg(sel) } } /// Alias for [`DS::set_reg()`] #[deprecated(since = "0.14.4", note = "use `DS::set_reg()` instead")] #[allow(clippy::missing_safety_doc)] #[inline] pub unsafe fn load_ds(sel: SegmentSelector) { - DS::set_reg(sel) + unsafe { DS::set_reg(sel) } } /// Alias for [`ES::set_reg()`] #[deprecated(since = "0.14.4", note = "use `ES::set_reg()` instead")] #[allow(clippy::missing_safety_doc)] #[inline] pub unsafe fn load_es(sel: SegmentSelector) { - ES::set_reg(sel) + unsafe { ES::set_reg(sel) } } /// Alias for [`FS::set_reg()`] #[deprecated(since = "0.14.4", note = "use `FS::set_reg()` instead")] #[allow(clippy::missing_safety_doc)] #[inline] pub unsafe fn load_fs(sel: SegmentSelector) { - FS::set_reg(sel) + unsafe { FS::set_reg(sel) } } /// Alias for [`GS::set_reg()`] #[deprecated(since = "0.14.4", note = "use `GS::set_reg()` instead")] #[allow(clippy::missing_safety_doc)] #[inline] pub unsafe fn load_gs(sel: SegmentSelector) { - GS::set_reg(sel) + unsafe { GS::set_reg(sel) } } /// Alias for [`GS::swap()`] #[deprecated(since = "0.14.4", note = "use `GS::swap()` instead")] #[allow(clippy::missing_safety_doc)] #[inline] pub unsafe fn swap_gs() { - GS::swap() + unsafe { GS::swap() } } /// Alias for [`CS::get_reg()`] #[deprecated(since = "0.14.4", note = "use `CS::get_reg()` instead")] @@ -283,7 +202,7 @@ pub fn cs() -> SegmentSelector { #[allow(clippy::missing_safety_doc)] #[inline] pub unsafe fn wrfsbase(val: u64) { - FS::write_base(VirtAddr::new(val)) + unsafe { FS::write_base(VirtAddr::new(val)) } } /// Alias for [`FS::read_base()`] #[deprecated(since = "0.14.4", note = "use `FS::read_base()` instead")] @@ -299,7 +218,7 @@ pub unsafe fn rdfsbase() -> u64 { #[allow(clippy::missing_safety_doc)] #[inline] pub unsafe fn wrgsbase(val: u64) { - GS::write_base(VirtAddr::new(val)) + unsafe { GS::write_base(VirtAddr::new(val)) } } /// Alias for [`GS::read_base()`] #[deprecated(since = "0.14.4", note = "use `GS::read_base()` instead")] diff --git a/src/instructions/tables.rs b/src/instructions/tables.rs index 4d8c7fe61..1b89d22f8 100644 --- a/src/instructions/tables.rs +++ b/src/instructions/tables.rs @@ -2,6 +2,8 @@ use crate::structures::gdt::SegmentSelector; use crate::VirtAddr; +#[cfg(feature = "inline_asm")] +use core::arch::asm; pub use crate::structures::DescriptorTablePointer; @@ -19,10 +21,14 @@ pub use crate::structures::DescriptorTablePointer; #[inline] pub unsafe fn lgdt(gdt: &DescriptorTablePointer) { #[cfg(feature = "inline_asm")] - asm!("lgdt [{}]", in(reg) gdt, options(readonly, nostack, preserves_flags)); + unsafe { + asm!("lgdt [{}]", in(reg) gdt, options(readonly, nostack, preserves_flags)); + } #[cfg(not(feature = "inline_asm"))] - crate::asm::x86_64_asm_lgdt(gdt as *const _); + unsafe { + crate::asm::x86_64_asm_lgdt(gdt as *const _); + } } /// Load an IDT. @@ -39,10 +45,14 @@ pub unsafe fn lgdt(gdt: &DescriptorTablePointer) { #[inline] pub unsafe fn lidt(idt: &DescriptorTablePointer) { #[cfg(feature = "inline_asm")] - asm!("lidt [{}]", in(reg) idt, options(readonly, nostack, preserves_flags)); + unsafe { + asm!("lidt [{}]", in(reg) idt, options(readonly, nostack, preserves_flags)); + } #[cfg(not(feature = "inline_asm"))] - crate::asm::x86_64_asm_lidt(idt as *const _); + unsafe { + crate::asm::x86_64_asm_lidt(idt as *const _); + } } /// Get the address of the current GDT. @@ -92,8 +102,12 @@ pub fn sidt() -> DescriptorTablePointer { #[inline] pub unsafe fn load_tss(sel: SegmentSelector) { #[cfg(feature = "inline_asm")] - asm!("ltr {0:x}", in(reg) sel.0, options(nostack, preserves_flags)); + unsafe { + asm!("ltr {0:x}", in(reg) sel.0, options(nostack, preserves_flags)); + } #[cfg(not(feature = "inline_asm"))] - crate::asm::x86_64_asm_ltr(sel.0); + unsafe { + crate::asm::x86_64_asm_ltr(sel.0); + } } diff --git a/src/instructions/tlb.rs b/src/instructions/tlb.rs index b01afb26a..27814072b 100644 --- a/src/instructions/tlb.rs +++ b/src/instructions/tlb.rs @@ -3,6 +3,8 @@ use core::fmt; use crate::VirtAddr; +#[cfg(feature = "inline_asm")] +use core::arch::asm; /// Invalidate the given address in the TLB using the `invlpg` instruction. #[inline] @@ -51,7 +53,7 @@ struct InvpcidDescriptor { /// Structure of a PCID. A PCID has to be <= 4096 for x86_64. #[repr(transparent)] -#[derive(Debug)] +#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct Pcid(u16); impl Pcid { @@ -86,6 +88,7 @@ impl fmt::Display for PcidTooBig { /// Invalidate the given address in the TLB using the `invpcid` instruction. /// /// ## Safety +/// /// This function is unsafe as it requires CPUID.(EAX=07H, ECX=0H):EBX.INVPCID to be 1. #[inline] pub unsafe fn flush_pcid(command: InvPicdCommand) { @@ -110,8 +113,12 @@ pub unsafe fn flush_pcid(command: InvPicdCommand) { } #[cfg(feature = "inline_asm")] - asm!("invpcid {0}, [{1}]", in(reg) kind, in(reg) &desc, options(nostack, preserves_flags)); + unsafe { + asm!("invpcid {0}, [{1}]", in(reg) kind, in(reg) &desc, options(nostack, preserves_flags)); + } #[cfg(not(feature = "inline_asm"))] - crate::asm::x86_64_asm_invpcid(kind, &desc as *const _ as u64); + unsafe { + crate::asm::x86_64_asm_invpcid(kind, &desc as *const _ as u64); + } } diff --git a/src/lib.rs b/src/lib.rs index 752ca3e55..0ee7b0b80 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -2,16 +2,16 @@ //! and access to various system registers. #![cfg_attr(not(test), no_std)] -#![cfg_attr(feature = "const_fn", feature(const_panic))] // Better panic messages #![cfg_attr(feature = "const_fn", feature(const_mut_refs))] // GDT add_entry() #![cfg_attr(feature = "const_fn", feature(const_fn_fn_ptr_basics))] // IDT new() #![cfg_attr(feature = "const_fn", feature(const_fn_trait_bound))] // PageSize marker trait #![cfg_attr(feature = "inline_asm", feature(asm))] #![cfg_attr(feature = "inline_asm", feature(asm_const))] // software_interrupt #![cfg_attr(feature = "abi_x86_interrupt", feature(abi_x86_interrupt))] -#![cfg_attr(docsrs, feature(doc_cfg))] +#![cfg_attr(feature = "doc_cfg", feature(doc_cfg))] #![warn(missing_docs)] #![deny(missing_debug_implementations)] +#![deny(unsafe_op_in_unsafe_fn)] use core::cell::UnsafeCell; use core::sync::atomic::{AtomicBool, Ordering}; @@ -49,19 +49,6 @@ macro_rules! const_fn { }; } -// Helper method for assert! in const fn. Uses out of bounds indexing if an -// assertion fails and the "const_fn" feature is not enabled. -#[cfg(feature = "const_fn")] -macro_rules! const_assert { - ($cond:expr, $($arg:tt)+) => { assert!($cond, $($arg)*) }; -} -#[cfg(not(feature = "const_fn"))] -macro_rules! const_assert { - ($cond:expr, $($arg:tt)+) => { - [(); 1][!($cond as bool) as usize] - }; -} - #[cfg(all(feature = "instructions", feature = "external_asm"))] pub(crate) mod asm; @@ -71,7 +58,7 @@ pub mod registers; pub mod structures; /// Represents a protection ring level. -#[derive(Debug, Copy, Clone, PartialEq, Eq)] +#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] #[repr(u8)] pub enum PrivilegeLevel { /// Privilege-level 0 (most privilege): This level is used by critical system-software diff --git a/src/registers/control.rs b/src/registers/control.rs index c55b55871..bd2b06316 100644 --- a/src/registers/control.rs +++ b/src/registers/control.rs @@ -1,9 +1,6 @@ //! Functions to read and write control registers. pub use super::model_specific::{Efer, EferFlags}; -#[cfg(docsrs)] -use crate::{registers::rflags::RFlags, structures::paging::PageTableFlags}; - use bitflags::bitflags; /// Various control flags modifying the basic operation of the CPU. @@ -164,6 +161,8 @@ bitflags! { mod x86_64 { use super::*; use crate::{instructions::tlb::Pcid, structures::paging::PhysFrame, PhysAddr, VirtAddr}; + #[cfg(feature = "inline_asm")] + use core::arch::asm; impl Cr0 { /// Read the current set of CR0 flags. @@ -203,7 +202,9 @@ mod x86_64 { let reserved = old_value & !(Cr0Flags::all().bits()); let new_value = reserved | flags.bits(); - Self::write_raw(new_value); + unsafe { + Self::write_raw(new_value); + } } /// Write raw CR0 flags. @@ -217,10 +218,14 @@ mod x86_64 { #[inline] pub unsafe fn write_raw(value: u64) { #[cfg(feature = "inline_asm")] - asm!("mov cr0, {}", in(reg) value, options(nostack, preserves_flags)); + unsafe { + asm!("mov cr0, {}", in(reg) value, options(nostack, preserves_flags)); + } #[cfg(not(feature = "inline_asm"))] - crate::asm::x86_64_asm_write_cr0(value); + unsafe { + crate::asm::x86_64_asm_write_cr0(value); + } } /// Updates CR0 flags. @@ -238,7 +243,9 @@ mod x86_64 { { let mut flags = Self::read(); f(&mut flags); - Self::write(flags); + unsafe { + Self::write(flags); + } } } @@ -246,6 +253,12 @@ mod x86_64 { /// Read the current page fault linear address from the CR2 register. #[inline] pub fn read() -> VirtAddr { + VirtAddr::new(Self::read_raw()) + } + + /// Read the current page fault linear address from the CR2 register as a raw `u64`. + #[inline] + pub fn read_raw() -> u64 { let value: u64; #[cfg(feature = "inline_asm")] @@ -257,7 +270,7 @@ mod x86_64 { value = crate::asm::x86_64_asm_read_cr2(); } - VirtAddr::new(value) + value } } @@ -301,27 +314,34 @@ mod x86_64 { /// Write a new P4 table address into the CR3 register. /// /// ## Safety + /// /// Changing the level 4 page table is unsafe, because it's possible to violate memory safety by /// changing the page mapping. #[inline] pub unsafe fn write(frame: PhysFrame, flags: Cr3Flags) { - Cr3::write_raw(frame, flags.bits() as u16); + unsafe { + Cr3::write_raw(frame, flags.bits() as u16); + } } /// Write a new P4 table address into the CR3 register. /// /// ## Safety + /// /// Changing the level 4 page table is unsafe, because it's possible to violate memory safety by /// changing the page mapping. /// [`Cr4Flags::PCID`] must be set before calling this method. #[inline] pub unsafe fn write_pcid(frame: PhysFrame, pcid: Pcid) { - Cr3::write_raw(frame, pcid.value()); + unsafe { + Cr3::write_raw(frame, pcid.value()); + } } /// Write a new P4 table address into the CR3 register. /// /// ## Safety + /// /// Changing the level 4 page table is unsafe, because it's possible to violate memory safety by /// changing the page mapping. #[inline] @@ -330,10 +350,14 @@ mod x86_64 { let value = addr.as_u64() | val as u64; #[cfg(feature = "inline_asm")] - asm!("mov cr3, {}", in(reg) value, options(nostack, preserves_flags)); + unsafe { + asm!("mov cr3, {}", in(reg) value, options(nostack, preserves_flags)); + } #[cfg(not(feature = "inline_asm"))] - crate::asm::x86_64_asm_write_cr3(value) + unsafe { + crate::asm::x86_64_asm_write_cr3(value) + } } } @@ -376,7 +400,9 @@ mod x86_64 { let reserved = old_value & !(Cr4Flags::all().bits()); let new_value = reserved | flags.bits(); - Self::write_raw(new_value); + unsafe { + Self::write_raw(new_value); + } } /// Write raw CR4 flags. @@ -391,15 +417,20 @@ mod x86_64 { #[inline] pub unsafe fn write_raw(value: u64) { #[cfg(feature = "inline_asm")] - asm!("mov cr4, {}", in(reg) value, options(nostack, preserves_flags)); + unsafe { + asm!("mov cr4, {}", in(reg) value, options(nostack, preserves_flags)); + } #[cfg(not(feature = "inline_asm"))] - crate::asm::x86_64_asm_write_cr4(value); + unsafe { + crate::asm::x86_64_asm_write_cr4(value); + } } /// Updates CR4 flags. /// /// Preserves the value of reserved fields. + /// /// ## Safety /// /// This function is unsafe because it's possible to violate memory @@ -412,7 +443,9 @@ mod x86_64 { { let mut flags = Self::read(); f(&mut flags); - Self::write(flags); + unsafe { + Self::write(flags); + } } } } diff --git a/src/registers/mod.rs b/src/registers/mod.rs index 27762f9dc..68ab57081 100644 --- a/src/registers/mod.rs +++ b/src/registers/mod.rs @@ -2,7 +2,9 @@ pub mod control; pub mod model_specific; +pub mod mxcsr; pub mod rflags; +pub mod segmentation; pub mod xcontrol; #[cfg(feature = "instructions")] diff --git a/src/registers/model_specific.rs b/src/registers/model_specific.rs index 6c7a0510b..099d2f471 100644 --- a/src/registers/model_specific.rs +++ b/src/registers/model_specific.rs @@ -1,12 +1,9 @@ //! Functions to read and write model specific registers. -#[cfg(docsrs)] -use crate::{ - instructions::segmentation::{Segment64, FS, GS}, - registers::control::Cr4Flags, -}; - use bitflags::bitflags; +// imports for intra doc links +#[cfg(doc)] +use crate::registers::segmentation::{FS, GS}; /// A model specific register. #[derive(Debug)] @@ -30,13 +27,19 @@ pub struct FsBase; /// [GS].Base Model Specific Register. /// -/// [`GS::swap`] swaps this register with [`KernelGsBase`]. +#[cfg_attr( + feature = "instructions", + doc = "[`GS::swap`] swaps this register with [`KernelGsBase`]." +)] #[derive(Debug)] pub struct GsBase; /// KernelGsBase Model Specific Register. /// -/// [`GS::swap`] swaps this register with [`GsBase`]. +#[cfg_attr( + feature = "instructions", + doc = "[`GS::swap`] swaps this register with [`GsBase`]." +)] #[derive(Debug)] pub struct KernelGsBase; @@ -119,6 +122,14 @@ mod x86_64 { use bit_field::BitField; use core::convert::TryInto; use core::fmt; + // imports for intra doc links + #[cfg(doc)] + use crate::registers::{ + control::Cr4Flags, + segmentation::{Segment, Segment64, CS, SS}, + }; + #[cfg(feature = "inline_asm")] + use core::arch::asm; impl Msr { /// Read 64 bits msr register. @@ -132,17 +143,21 @@ mod x86_64 { #[cfg(feature = "inline_asm")] { let (high, low): (u32, u32); - asm!( - "rdmsr", - in("ecx") self.0, - out("eax") low, out("edx") high, - options(nomem, nostack, preserves_flags), - ); + unsafe { + asm!( + "rdmsr", + in("ecx") self.0, + out("eax") low, out("edx") high, + options(nomem, nostack, preserves_flags), + ); + } ((high as u64) << 32) | (low as u64) } #[cfg(not(feature = "inline_asm"))] - crate::asm::x86_64_asm_rdmsr(self.0) + unsafe { + crate::asm::x86_64_asm_rdmsr(self.0) + } } /// Write 64 bits to msr register. @@ -157,15 +172,19 @@ mod x86_64 { let high = (value >> 32) as u32; #[cfg(feature = "inline_asm")] - asm!( - "wrmsr", - in("ecx") self.0, - in("eax") low, in("edx") high, - options(nostack, preserves_flags), - ); + unsafe { + asm!( + "wrmsr", + in("ecx") self.0, + in("eax") low, in("edx") high, + options(nostack, preserves_flags), + ); + } #[cfg(not(feature = "inline_asm"))] - crate::asm::x86_64_asm_wrmsr(self.0, low, high); + unsafe { + crate::asm::x86_64_asm_wrmsr(self.0, low, high); + } } } @@ -196,7 +215,9 @@ mod x86_64 { let reserved = old_value & !(EferFlags::all().bits()); let new_value = reserved | flags.bits(); - Self::write_raw(new_value); + unsafe { + Self::write_raw(new_value); + } } /// Write the EFER flags. @@ -210,7 +231,9 @@ mod x86_64 { #[inline] pub unsafe fn write_raw(flags: u64) { let mut msr = Self::MSR; - msr.write(flags); + unsafe { + msr.write(flags); + } } /// Update EFER flags. @@ -228,7 +251,9 @@ mod x86_64 { { let mut flags = Self::read(); f(&mut flags); - Self::write(flags); + unsafe { + Self::write(flags); + } } } @@ -353,7 +378,9 @@ mod x86_64 { msr_value.set_bits(48..64, sysret.into()); msr_value.set_bits(32..48, syscall.into()); let mut msr = Self::MSR; - msr.write(msr_value); + unsafe { + msr.write(msr_value); + } } /// Write the Ring 0 and Ring 3 segment bases. diff --git a/src/registers/mxcsr.rs b/src/registers/mxcsr.rs new file mode 100644 index 000000000..f10812b31 --- /dev/null +++ b/src/registers/mxcsr.rs @@ -0,0 +1,121 @@ +//! Functions to read and write MXCSR register. + +#[cfg(feature = "instructions")] +pub use self::x86_64::*; + +use bitflags::bitflags; + +bitflags! { + /// MXCSR register. + #[repr(transparent)] + pub struct MxCsr: u32 { + /// Invalid operation + const INVALID_OPERATION = 1 << 0; + /// Denormal + const DENORMAL = 1 << 1; + /// Divide-by-zero + const DIVIDE_BY_ZERO = 1 << 2; + /// Overflow + const OVERFLOW = 1 << 3; + /// Underflow + const UNDERFLOW = 1 << 4; + /// Precision + const PRECISION = 1 << 5; + /// Denormals are zeros + const DENORMALS_ARE_ZEROS = 1 << 6; + /// Invalid operation mask + const INVALID_OPERATION_MASK = 1 << 7; + /// Denormal mask + const DENORMAL_MASK = 1 << 8; + /// Divide-by-zero mask + const DIVIDE_BY_ZERO_MASK = 1 << 9; + /// Overflow mask + const OVERFLOW_MASK = 1 << 10; + /// Underflow mask + const UNDERFLOW_MASK = 1 << 11; + /// Precision mask + const PRECISION_MASK = 1 << 12; + /// Toward negative infinity + const ROUNDING_CONTROL_NEGATIVE = 1 << 13; + /// Toward positive infinity + const ROUNDING_CONTROL_POSITIVE = 1 << 14; + /// Toward zero (positive + negative) + const ROUNDING_CONTROL_ZERO = 3 << 13; + /// Flush to zero + const FLUSH_TO_ZERO = 1 << 15; + } +} + +impl Default for MxCsr { + /// Return the default MXCSR value at reset, as documented in Intel SDM volume 2A. + #[inline] + fn default() -> Self { + MxCsr::INVALID_OPERATION_MASK + | MxCsr::DENORMAL_MASK + | MxCsr::DIVIDE_BY_ZERO_MASK + | MxCsr::OVERFLOW_MASK + | MxCsr::UNDERFLOW_MASK + | MxCsr::PRECISION_MASK + } +} + +#[cfg(feature = "instructions")] +mod x86_64 { + use super::*; + #[cfg(feature = "inline_asm")] + use core::arch::asm; + + /// Read the value of MXCSR. + #[inline] + pub fn read() -> MxCsr { + #[cfg(feature = "inline_asm")] + { + let mut mxcsr: u32 = 0; + unsafe { + asm!("stmxcsr [{}]", in(reg) &mut mxcsr, options(nostack, preserves_flags)); + } + MxCsr::from_bits_truncate(mxcsr) + } + #[cfg(not(feature = "inline_asm"))] + unsafe { + MxCsr::from_bits_truncate(crate::asm::x86_64_asm_read_mxcsr()) + } + } + + /// Write MXCSR. + #[inline] + pub fn write(mxcsr: MxCsr) { + #[cfg(feature = "inline_asm")] + unsafe { + asm!("ldmxcsr [{}]", in(reg) &mxcsr, options(nostack, readonly)); + } + #[cfg(not(feature = "inline_asm"))] + unsafe { + crate::asm::x86_64_asm_write_mxcsr(mxcsr.bits()); + } + } + + #[cfg(test)] + mod test { + use crate::registers::mxcsr::*; + + #[test] + fn mxcsr_default() { + let mxcsr = read(); + assert_eq!(mxcsr, MxCsr::from_bits_truncate(0x1F80)); + } + + #[test] + fn mxcsr_read() { + let mxcsr = read(); + assert_eq!(mxcsr, MxCsr::default()); + } + + #[test] + fn mxcsr_write() { + let mxcsr = read(); + write(mxcsr); + assert_eq!(mxcsr, read()); + } + } +} diff --git a/src/registers/rflags.rs b/src/registers/rflags.rs index fff539a5e..dad0b51ce 100644 --- a/src/registers/rflags.rs +++ b/src/registers/rflags.rs @@ -66,6 +66,8 @@ bitflags! { #[cfg(feature = "instructions")] mod x86_64 { use super::*; + #[cfg(feature = "inline_asm")] + use core::arch::asm; /// Returns the current value of the RFLAGS register. /// @@ -105,7 +107,9 @@ mod x86_64 { let reserved = old_value & !(RFlags::all().bits()); let new_value = reserved | flags.bits(); - write_raw(new_value); + unsafe { + write_raw(new_value); + } } /// Writes the RFLAGS register. @@ -123,10 +127,14 @@ mod x86_64 { // HACK: we mark this function as preserves_flags to prevent Rust from restoring // saved flags after the "popf" below. See above note on safety. #[cfg(feature = "inline_asm")] - asm!("push {}; popfq", in(reg) val, options(nomem, preserves_flags)); + unsafe { + asm!("push {}; popfq", in(reg) val, options(nomem, preserves_flags)); + } #[cfg(not(feature = "inline_asm"))] - crate::asm::x86_64_asm_write_rflags(val); + unsafe { + crate::asm::x86_64_asm_write_rflags(val); + } } #[cfg(test)] diff --git a/src/registers/segmentation.rs b/src/registers/segmentation.rs new file mode 100644 index 000000000..be497d075 --- /dev/null +++ b/src/registers/segmentation.rs @@ -0,0 +1,155 @@ +//! Abstractions for segment registers. + +use super::model_specific::Msr; +use crate::{PrivilegeLevel, VirtAddr}; +use bit_field::BitField; +use core::fmt; +// imports for intra doc links +#[cfg(doc)] +use crate::registers::control::Cr4Flags; + +/// An x86 segment +/// +/// Segment registers on x86 are 16-bit [`SegmentSelector`]s, which index into +/// the [`GlobalDescriptorTable`](crate::structures::gdt::GlobalDescriptorTable). The +/// corresponding GDT entry is used to +/// configure the segment itself. Note that most segmentation functionality is +/// disabled in 64-bit mode. See the individual segments for more information. +pub trait Segment { + /// Returns the current value of the segment register. + fn get_reg() -> SegmentSelector; + /// Reload the segment register. Depending on the segment, this may also + /// reconfigure the corresponding segment. + /// + /// ## Safety + /// + /// This function is unsafe because the caller must ensure that `sel` + /// is a valid segment descriptor, and that reconfiguring the segment will + /// not cause undefined behavior. + unsafe fn set_reg(sel: SegmentSelector); +} + +/// An x86 segment which is actually used in 64-bit mode +/// +/// While most segments are unused in 64-bit mode, the FS and GS segments are +/// still partially used. Only the 64-bit segment base address is used, and this +/// address can be set via the GDT, or by using the `FSGSBASE` instructions. +pub trait Segment64: Segment { + /// MSR containing the segment base. This MSR can be used to set the base + /// when [`CR4.FSGSBASE`][Cr4Flags::FSGSBASE] is **not** set. + const BASE: Msr; + /// Reads the segment base address + /// + /// ## Exceptions + /// + /// If [`CR4.FSGSBASE`][Cr4Flags::FSGSBASE] is not set, this instruction will throw a `#UD`. + fn read_base() -> VirtAddr; + /// Writes the segment base address + /// + /// ## Exceptions + /// + /// If [`CR4.FSGSBASE`][Cr4Flags::FSGSBASE] is not set, this instruction will throw a `#UD`. + /// + /// ## Safety + /// + /// The caller must ensure that this write operation has no unsafe side + /// effects, as the segment base address might be in use. + unsafe fn write_base(base: VirtAddr); +} + +/// Specifies which element to load into a segment from +/// descriptor tables (i.e., is a index to LDT or GDT table +/// with some additional flags). +/// +/// See Intel 3a, Section 3.4.2 "Segment Selectors" +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[repr(transparent)] +pub struct SegmentSelector(pub u16); + +impl SegmentSelector { + /// Creates a new SegmentSelector + /// + /// # Arguments + /// * `index`: index in GDT or LDT array (not the offset) + /// * `rpl`: the requested privilege level + #[inline] + pub const fn new(index: u16, rpl: PrivilegeLevel) -> SegmentSelector { + SegmentSelector(index << 3 | (rpl as u16)) + } + + /// Returns the GDT index. + #[inline] + pub fn index(self) -> u16 { + self.0 >> 3 + } + + /// Returns the requested privilege level. + #[inline] + pub fn rpl(self) -> PrivilegeLevel { + PrivilegeLevel::from_u16(self.0.get_bits(0..2)) + } + + /// Set the privilege level for this Segment selector. + #[inline] + pub fn set_rpl(&mut self, rpl: PrivilegeLevel) { + self.0.set_bits(0..2, rpl as u16); + } +} + +impl fmt::Debug for SegmentSelector { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let mut s = f.debug_struct("SegmentSelector"); + s.field("index", &self.index()); + s.field("rpl", &self.rpl()); + s.finish() + } +} + +/// Code Segment +/// +/// The segment base and limit are unused in 64-bit mode. Only the L (long), D +/// (default operation size), and DPL (descriptor privilege-level) fields of the +/// descriptor are recognized. So changing the segment register can be used to +/// change privilege level or enable/disable long mode. +#[derive(Debug)] +pub struct CS; + +/// Stack Segment +/// +/// Entirely unused in 64-bit mode; setting the segment register does nothing. +/// However, in ring 3, the SS register still has to point to a valid +/// [`Descriptor`](crate::structures::gdt::Descriptor) (it cannot be zero). This +/// means a user-mode read/write segment descriptor must be present in the GDT. +/// +/// This register is also set by the `syscall`/`sysret` and +/// `sysenter`/`sysexit` instructions (even on 64-bit transitions). This is to +/// maintain symmetry with 32-bit transitions where setting SS actually will +/// actually have an effect. +#[derive(Debug)] +pub struct SS; + +/// Data Segment +/// +/// Entirely unused in 64-bit mode; setting the segment register does nothing. +#[derive(Debug)] +pub struct DS; + +/// ES Segment +/// +/// Entirely unused in 64-bit mode; setting the segment register does nothing. +#[derive(Debug)] +pub struct ES; + +/// FS Segment +/// +/// Only base is used in 64-bit mode, see [`Segment64`]. This is often used in +/// user-mode for Thread-Local Storage (TLS). +#[derive(Debug)] +pub struct FS; + +/// GS Segment +/// +/// Only base is used in 64-bit mode, see [`Segment64`]. In kernel-mode, the GS +/// base often points to a per-cpu kernel data structure. +#[derive(Debug)] +pub struct GS; diff --git a/src/registers/xcontrol.rs b/src/registers/xcontrol.rs index cc94858c9..3f85001c3 100644 --- a/src/registers/xcontrol.rs +++ b/src/registers/xcontrol.rs @@ -54,6 +54,9 @@ bitflags! { #[cfg(feature = "instructions")] mod x86_64 { use super::*; + #[cfg(feature = "inline_asm")] + use core::arch::asm; + impl XCr0 { /// Read the current set of XCR0 flags. #[inline] @@ -123,7 +126,9 @@ mod x86_64 { ); } - Self::write_raw(new_value); + unsafe { + Self::write_raw(new_value); + } } /// Write raw XCR0 flags. @@ -140,15 +145,19 @@ mod x86_64 { let high = (value >> 32) as u32; #[cfg(feature = "inline_asm")] - asm!( - "xsetbv", - in("ecx") 0, - in("rax") low, in("rdx") high, - options(nomem, nostack, preserves_flags), - ); + unsafe { + asm!( + "xsetbv", + in("ecx") 0, + in("rax") low, in("rdx") high, + options(nomem, nostack, preserves_flags), + ); + } #[cfg(not(feature = "inline_asm"))] - crate::asm::x86_64_asm_xsetbv(0, low, high); + unsafe { + crate::asm::x86_64_asm_xsetbv(0, low, high); + } } } } diff --git a/src/structures/gdt.rs b/src/structures/gdt.rs index 27bf88b02..fdf1ea546 100644 --- a/src/structures/gdt.rs +++ b/src/structures/gdt.rs @@ -1,58 +1,13 @@ //! Types for the Global Descriptor Table and segment selectors. +pub use crate::registers::segmentation::SegmentSelector; use crate::structures::tss::TaskStateSegment; use crate::PrivilegeLevel; use bit_field::BitField; use bitflags::bitflags; -use core::fmt; - -/// Specifies which element to load into a segment from -/// descriptor tables (i.e., is a index to LDT or GDT table -/// with some additional flags). -/// -/// See Intel 3a, Section 3.4.2 "Segment Selectors" -#[derive(Clone, Copy, PartialEq, Eq)] -#[repr(transparent)] -pub struct SegmentSelector(pub u16); - -impl SegmentSelector { - /// Creates a new SegmentSelector - /// - /// # Arguments - /// * `index`: index in GDT or LDT array (not the offset) - /// * `rpl`: the requested privilege level - #[inline] - pub const fn new(index: u16, rpl: PrivilegeLevel) -> SegmentSelector { - SegmentSelector(index << 3 | (rpl as u16)) - } - - /// Returns the GDT index. - #[inline] - pub fn index(self) -> u16 { - self.0 >> 3 - } - - /// Returns the requested privilege level. - #[inline] - pub fn rpl(self) -> PrivilegeLevel { - PrivilegeLevel::from_u16(self.0.get_bits(0..2)) - } - - /// Set the privilege level for this Segment selector. - #[inline] - pub fn set_rpl(&mut self, rpl: PrivilegeLevel) { - self.0.set_bits(0..2, rpl as u16); - } -} - -impl fmt::Debug for SegmentSelector { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - let mut s = f.debug_struct("SegmentSelector"); - s.field("index", &self.index()); - s.field("rpl", &self.rpl()); - s.finish() - } -} +// imports for intra-doc links +#[cfg(doc)] +use crate::registers::segmentation::{Segment, CS, SS}; /// A 64-bit mode global descriptor table (GDT). /// @@ -117,7 +72,7 @@ impl GlobalDescriptorTable { let mut table = [0; 8]; let mut idx = 0; - const_assert!( + assert!( next_free <= 8, "initializing a GDT from a slice requires it to be **at most** 8 elements." ); @@ -173,8 +128,7 @@ impl GlobalDescriptorTable { /// Loads the GDT in the CPU using the `lgdt` instruction. This does **not** alter any of the /// segment registers; you **must** (re)load them yourself using [the appropriate /// functions](crate::instructions::segmentation): - /// [load_ss](crate::instructions::segmentation::load_ss), - /// [set_cs](crate::instructions::segmentation::set_cs). + /// [`SS::set_reg()`] and [`CS::set_reg()`]. #[cfg(feature = "instructions")] #[inline] pub fn load(&'static mut self) { @@ -185,8 +139,7 @@ impl GlobalDescriptorTable { /// Loads the GDT in the CPU using the `lgdt` instruction. This does **not** alter any of the /// segment registers; you **must** (re)load them yourself using [the appropriate /// functions](crate::instructions::segmentation): - /// [load_ss](crate::instructions::segmentation::load_ss), - /// [set_cs](crate::instructions::segmentation::set_cs). + /// [`SS::set_reg()`] and [`CS::set_reg()`]. /// /// # Safety /// @@ -198,7 +151,9 @@ impl GlobalDescriptorTable { #[inline] pub unsafe fn load_unsafe(&mut self) { use crate::instructions::tables::lgdt; - lgdt(&self.pointer()); + unsafe { + lgdt(&self.pointer()); + } } const_fn! { diff --git a/src/structures/idt.rs b/src/structures/idt.rs index e82366040..be82359d3 100644 --- a/src/structures/idt.rs +++ b/src/structures/idt.rs @@ -346,8 +346,37 @@ pub struct InterruptDescriptorTable { /// vector nr. 20 pub virtualization: Entry, - /// vector nr. 21-29 - reserved_2: [Entry; 9], + /// vector nr. 21-28 + reserved_2: [Entry; 8], + + /// The VMM Communication Exception (`#VC`) is always generated by hardware when an `SEV-ES` + /// enabled guest is running and an `NAE` event occurs. + /// + /// `SEV-ES` stands for the _"Encrypted State"_ feature of the _"AMD Secure Encrypted Virtualization"_ + /// technology. `NAE` stands for an _"Non-Automatic Exit"_, which is an `VMEXIT` event that requires + /// hypervisor emulation. See + /// [this whitepaper](https://www.amd.com/system/files/TechDocs/Protecting%20VM%20Register%20State%20with%20SEV-ES.pdf) + /// for an overview of the `SEV-ES` feature. + /// + /// The `#VC` exception is a precise, contributory, fault-type exception utilizing exception vector 29. + /// This exception cannot be masked. The error code of the `#VC` exception is equal + /// to the `#VMEXIT` code of the event that caused the `NAE`. + /// + /// In response to a `#VC` exception, a typical flow would involve the guest handler inspecting the error + /// code to determine the cause of the exception and deciding what register state must be copied to the + /// `GHCB` (_"Guest Hypervisor Communication Block"_) for the event to be handled. The handler + /// should then execute the `VMGEXIT` instruction to + /// create an `AE` and invoke the hypervisor. After a later `VMRUN`, guest execution will resume after the + /// `VMGEXIT` instruction where the handler can view the results from the hypervisor and copy state from + /// the `GHCB` back to its internal state as needed. + /// + /// Note that it is inadvisable for the hypervisor to set the `VMCB` (_"Virtual Machine Control Block"_) + /// intercept bit for the `#VC` exception as + /// this would prevent proper handling of `NAE`s by the guest. Similarly, the hypervisor should avoid + /// setting intercept bits for events that would occur in the `#VC` handler (such as `IRET`). + /// + /// The vector number of the ``#VC`` exception is 29. + pub vmm_communication_exception: Entry, /// The Security Exception (`#SX`) signals security-sensitive events that occur while /// executing the VMM, in the form of an exception so that the VMM may take appropriate @@ -413,7 +442,8 @@ impl InterruptDescriptorTable { machine_check: Entry::missing(), simd_floating_point: Entry::missing(), virtualization: Entry::missing(), - reserved_2: [Entry::missing(); 9], + reserved_2: [Entry::missing(); 8], + vmm_communication_exception: Entry::missing(), security_exception: Entry::missing(), reserved_3: Entry::missing(), interrupts: [Entry::missing(); 256 - 32], @@ -448,7 +478,9 @@ impl InterruptDescriptorTable { #[inline] pub unsafe fn load_unsafe(&self) { use crate::instructions::tables::lidt; - lidt(&self.pointer()); + unsafe { + lidt(&self.pointer()); + } } /// Creates the descriptor pointer for this table. This pointer can only be @@ -524,8 +556,8 @@ impl Index for InterruptDescriptorTable { 19 => &self.simd_floating_point, 20 => &self.virtualization, i @ 32..=255 => &self.interrupts[usize::from(i - 32)], - i @ 15 | i @ 31 | i @ 21..=29 => panic!("entry {} is reserved", i), - i @ 8 | i @ 10..=14 | i @ 17 | i @ 30 => { + i @ 15 | i @ 31 | i @ 21..=28 => panic!("entry {} is reserved", i), + i @ 8 | i @ 10..=14 | i @ 17 | i @ 29 | i @ 30 => { panic!("entry {} is an exception with error code", i) } i @ 18 => panic!("entry {} is an diverging exception (must not return)", i), @@ -553,8 +585,8 @@ impl IndexMut for InterruptDescriptorTable { 19 => &mut self.simd_floating_point, 20 => &mut self.virtualization, i @ 32..=255 => &mut self.interrupts[usize::from(i - 32)], - i @ 15 | i @ 31 | i @ 21..=29 => panic!("entry {} is reserved", i), - i @ 8 | i @ 10..=14 | i @ 17 | i @ 30 => { + i @ 15 | i @ 31 | i @ 21..=28 => panic!("entry {} is reserved", i), + i @ 8 | i @ 10..=14 | i @ 17 | i @ 29 | i @ 30 => { panic!("entry {} is an exception with error code", i) } i @ 18 => panic!("entry {} is an diverging exception (must not return)", i), @@ -689,6 +721,9 @@ pub type DivergingHandlerFuncWithErrCode = #[derive(Copy, Clone, Debug)] pub struct DivergingHandlerFuncWithErrCode(()); +/// A general handler function for an interrupt or an exception with the interrupt/exceptions's index and an optional error code. +pub type GeneralHandlerFunc = fn(InterruptStackFrame, index: u8, error_code: Option); + impl Entry { /// Creates a non-present IDT entry (but sets the must-be-one bits). #[inline] @@ -727,7 +762,7 @@ impl Entry { self.options = EntryOptions::minimal(); // SAFETY: The current CS is a valid, long-mode code segment. - self.options.set_code_selector(CS::get_reg()); + unsafe { self.options.set_code_selector(CS::get_reg()) }; self.options.set_present(true); &mut self.options } @@ -841,8 +876,6 @@ impl EntryOptions { /// Set the required privilege level (DPL) for invoking the handler. The DPL can be 0, 1, 2, /// or 3, the default is 0. If CPL < DPL, a general protection fault occurs. - /// - /// This function panics for a DPL > 3. #[inline] pub fn set_privilege_level(&mut self, dpl: PrivilegeLevel) -> &mut Self { self.bits.set_bits(13..15, dpl as u16); @@ -863,6 +896,7 @@ impl EntryOptions { /// This function panics if the index is not in the range 0..7. /// /// ## Safety + /// /// This function is unsafe because the caller must ensure that the passed stack index is /// valid and not used by other interrupts. Otherwise, memory safety violations are possible. #[inline] @@ -926,7 +960,7 @@ impl fmt::Debug for InterruptStackFrame { } /// Represents the interrupt stack frame pushed by the CPU on interrupt or exception entry. -#[derive(Clone)] +#[derive(Clone, Copy)] #[repr(C)] pub struct InterruptStackFrameValue { /// This value points to the instruction that should be executed when the interrupt @@ -961,6 +995,10 @@ impl fmt::Debug for InterruptStackFrameValue { bitflags! { /// Describes an page fault error code. + /// + /// This structure is defined by the following manual sections: + /// * AMD Volume 2: 8.4.2 + /// * Intel Volume 3A: 4.7 #[repr(transparent)] pub struct PageFaultErrorCode: u64 { /// If this flag is set, the page fault was caused by a page-protection violation, @@ -984,6 +1022,21 @@ bitflags! { /// If this flag is set, it indicates that the access that caused the page fault was an /// instruction fetch. const INSTRUCTION_FETCH = 1 << 4; + + /// If this flag is set, it indicates that the page fault was caused by a protection key. + const PROTECTION_KEY = 1 << 5; + + /// If this flag is set, it indicates that the page fault was caused by a shadow stack + /// access. + const SHADOW_STACK = 1 << 6; + + /// If this flag is set, it indicates that the page fault was caused by SGX access-control + /// requirements (Intel-only). + const SGX = 1 << 15; + + /// If this flag is set, it indicates that the page fault is a result of the processor + /// encountering an RMP violation (AMD-only). + const RMP = 1 << 31; } } @@ -1062,10 +1115,313 @@ pub enum DescriptorTable { Ldt, } +/// This structure defines the CPU-internal exception vector numbers. +/// +/// The values are defined by the following manual sections: +/// * AMD Volume 2: 8.2 +/// * Intel Volume 3A: 6.3.1 +#[repr(u8)] +#[non_exhaustive] +#[derive(Copy, Clone, Debug, PartialEq)] +pub enum ExceptionVector { + /// Error during Division + Division = 0x00, + + /// Debug + Debug = 0x01, + + /// Non-Maskable Interrupt + NonMaskableInterrupt = 0x02, + + /// Breakpoint + Breakpoint = 0x03, + + /// Overflow + Overflow = 0x04, + + /// Bound Range Exceeded + BoundRange = 0x05, + + /// Invalid Opcode + InvalidOpcode = 0x06, + + /// Device Not Available + DeviceNotAvailable = 0x07, + + /// Double Fault + Double = 0x08, + + /// Invalid TSS + InvalidTss = 0x0A, + + /// Segment Not Present + SegmentNotPresent = 0x0B, + + /// Stack Fault + Stack = 0x0C, + + /// General Protection Fault + GeneralProtection = 0x0D, + + /// Page Fault + Page = 0x0E, + + /// x87 Floating-Point Exception + X87FloatingPoint = 0x10, + + /// Alignment Check + AlignmentCheck = 0x11, + + /// Machine Check + MachineCheck = 0x12, + + /// SIMD Floating-Point Exception + SimdFloatingPoint = 0x13, + + /// Virtualization Exception (Intel-only) + Virtualization = 0x14, + + /// Control Protection Exception + ControlProtection = 0x15, + + /// Hypervisor Injection (AMD-only) + HypervisorInjection = 0x1C, + + /// VMM Communication (AMD-only) + VmmCommunication = 0x1D, + + /// Security Exception + Security = 0x1E, +} + +#[cfg(all(feature = "instructions", feature = "abi_x86_interrupt"))] +#[macro_export] +/// Set a general handler in an [`InterruptDescriptorTable`]. +/// ``` +/// #![feature(abi_x86_interrupt)] +/// use x86_64::set_general_handler; +/// use x86_64::structures::idt::{InterruptDescriptorTable, InterruptStackFrame}; +/// +/// let mut idt = InterruptDescriptorTable::new(); +/// fn my_general_handler( +/// stack_frame: InterruptStackFrame, +/// index: u8, +/// error_code: Option, +/// ) { +/// todo!("handle irq {}", index) +/// } +/// +/// // set only one entry +/// # // there seems to be a bug in LLVM that causes rustc to crash on windows when compiling this test: +/// # // https://github.com/rust-osdev/x86_64/pull/285#issuecomment-962642984 +/// # #[cfg(not(windows))] +/// set_general_handler!(&mut idt, my_general_handler, 14); +/// +/// // set a range of entries +/// # // there seems to be a bug in LLVM that causes rustc to crash on windows when compiling this test: +/// # // https://github.com/rust-osdev/x86_64/pull/285#issuecomment-962642984 +/// # #[cfg(not(windows))] +/// set_general_handler!(&mut idt, my_general_handler, 32..64); +/// +/// // set all entries +/// # // there seems to be a bug in LLVM that causes rustc to crash on windows when compiling this test: +/// # // https://github.com/rust-osdev/x86_64/pull/285#issuecomment-962642984 +/// # #[cfg(not(windows))] +/// set_general_handler!(&mut idt, my_general_handler); +/// ``` +macro_rules! set_general_handler { + ($idt:expr, $handler:ident) => { + $crate::set_general_handler!($idt, $handler, 0..=255); + }; + ($idt:expr, $handler:ident, $idx:literal) => { + $crate::set_general_handler!($idt, $handler, $idx..=$idx); + }; + ($idt:expr, $handler:ident, $range:expr) => {{ + /// This constant is used to avoid spamming the same compilation error ~200 times + /// when the handler's signature is wrong. + /// If we just passed `$handler` to `set_general_handler_recursive_bits` + /// an error would be reported for every interrupt handler that tried to call it. + /// With `GENERAL_HANDLER` the error is only reported once for this constant. + const GENERAL_HANDLER: $crate::structures::idt::GeneralHandlerFunc = $handler; + + { + fn set_general_handler( + idt: &mut $crate::structures::idt::InterruptDescriptorTable, + range: impl ::core::ops::RangeBounds, + ) { + $crate::set_general_handler_recursive_bits!(idt, GENERAL_HANDLER, range); + } + set_general_handler($idt, $range); + } + }}; +} + +#[cfg(all(feature = "instructions", feature = "abi_x86_interrupt"))] +#[macro_export] +#[doc(hidden)] +/// We can't loop in macros, but we can use recursion. +/// This macro recursivly adds one more bit to it's arguments until we have 8 bits so that we can call set_general_handler_entry. +macro_rules! set_general_handler_recursive_bits { + // if we have 8 all bits, construct the index from the bits, check if the entry is in range and invoke the macro that sets the handler + ($idt:expr, $handler:ident, $range:expr, $bit7:tt, $bit6:tt, $bit5:tt, $bit4:tt, $bit3:tt, $bit2:tt, $bit1:tt, $bit0:tt) => {{ + const IDX: u8 = $bit0 | ($bit1 << 1) | ($bit2 << 2) | ($bit3 << 3) | ($bit4 << 4) | ($bit5 << 5) | ($bit6 << 6) | ($bit7 << 7); + + #[allow(unreachable_code)] + if $range.contains(&IDX) { + $crate::set_general_handler_entry!($idt, $handler, IDX, $bit7, $bit6, $bit5, $bit4, $bit3, $bit2, $bit1, $bit0); + } + }}; + // otherwise recursivly invoke the macro adding one more bit + ($idt:expr, $handler:ident, $range:expr $(, $bits:tt)*) => { + $crate::set_general_handler_recursive_bits!($idt, $handler, $range $(, $bits)*, 0); + $crate::set_general_handler_recursive_bits!($idt, $handler, $range $(, $bits)*, 1); + }; +} + +#[cfg(all(feature = "instructions", feature = "abi_x86_interrupt"))] +#[macro_export] +#[doc(hidden)] +macro_rules! set_general_handler_entry { + // special case entries that don't have the `HandlerFunc` signature + ($idt:expr, $handler:ident, $idx:expr, 0, 0, 0, 0, 1, 0, 0, 0) => {{ + extern "x86-interrupt" fn handler( + frame: $crate::structures::idt::InterruptStackFrame, + error_code: u64, + ) -> ! { + $handler(frame, $idx.into(), Some(error_code)); + panic!("General handler returned on double fault"); + } + $idt.double_fault.set_handler_fn(handler); + }}; + ($idt:expr, $handler:ident, $idx:ident, 0, 0, 0, 0, 1, 0, 1, 0) => {{ + extern "x86-interrupt" fn handler( + frame: $crate::structures::idt::InterruptStackFrame, + error_code: u64, + ) { + $handler(frame, $idx.into(), Some(error_code)); + } + $idt.invalid_tss.set_handler_fn(handler); + }}; + ($idt:expr, $handler:ident, $idx:ident, 0, 0, 0, 0, 1, 0, 1, 1) => {{ + extern "x86-interrupt" fn handler( + frame: $crate::structures::idt::InterruptStackFrame, + error_code: u64, + ) { + $handler(frame, $idx.into(), Some(error_code)); + } + $idt.segment_not_present.set_handler_fn(handler); + }}; + ($idt:expr, $handler:ident, $idx:ident, 0, 0, 0, 0, 1, 1, 0, 0) => {{ + extern "x86-interrupt" fn handler( + frame: $crate::structures::idt::InterruptStackFrame, + error_code: u64, + ) { + $handler(frame, $idx.into(), Some(error_code)); + } + $idt.stack_segment_fault.set_handler_fn(handler); + }}; + ($idt:expr, $handler:ident, $idx:ident, 0, 0, 0, 0, 1, 1, 0, 1) => {{ + extern "x86-interrupt" fn handler( + frame: $crate::structures::idt::InterruptStackFrame, + error_code: u64, + ) { + $handler(frame, $idx.into(), Some(error_code)); + } + $idt.general_protection_fault.set_handler_fn(handler); + }}; + ($idt:expr, $handler:ident, $idx:ident, 0, 0, 0, 0, 1, 1, 1, 0) => {{ + extern "x86-interrupt" fn handler( + frame: $crate::structures::idt::InterruptStackFrame, + error_code: $crate::structures::idt::PageFaultErrorCode, + ) { + $handler(frame, IDX.into(), Some(error_code.bits())); + } + $idt.page_fault.set_handler_fn(handler); + }}; + ($idt:expr, $handler:ident, $idx:ident, 0, 0, 0, 1, 0, 0, 0, 1) => {{ + extern "x86-interrupt" fn handler( + frame: $crate::structures::idt::InterruptStackFrame, + error_code: u64, + ) { + $handler(frame, $idx.into(), Some(error_code)); + } + $idt.alignment_check.set_handler_fn(handler); + }}; + ($idt:expr, $handler:ident, $idx:ident, 0, 0, 0, 1, 0, 0, 1, 0) => {{ + extern "x86-interrupt" fn handler( + frame: $crate::structures::idt::InterruptStackFrame, + ) -> ! { + $handler(frame, $idx.into(), None); + panic!("General handler returned on machine check exception"); + } + $idt.machine_check.set_handler_fn(handler); + }}; + ($idt:expr, $handler:ident, $idx:ident, 0, 0, 0, 1, 1, 1, 0, 1) => { + extern "x86-interrupt" fn handler( + frame: $crate::structures::idt::InterruptStackFrame, + error_code: u64, + ) { + $handler(frame, $idx.into(), Some(error_code)); + } + $idt.vmm_communication_exception.set_handler_fn(handler); + }; + ($idt:expr, $handler:ident, $idx:ident, 0, 0, 0, 1, 1, 1, 1, 0) => {{ + extern "x86-interrupt" fn handler( + frame: $crate::structures::idt::InterruptStackFrame, + error_code: u64, + ) { + $handler(frame, $idx.into(), Some(error_code)); + } + $idt.security_exception.set_handler_fn(handler); + }}; + + // reserved_1 + ($idt:expr, $handler:ident, $idx:ident, 0, 0, 0, 0, 1, 1, 1, 1) => {}; + // reserved_2 + ($idt:expr, $handler:ident, $idx:ident, 0, 0, 0, 1, 0, 1, 0, 1) => {}; + ($idt:expr, $handler:ident, $idx:ident, 0, 0, 0, 1, 0, 1, 1, 0) => {}; + ($idt:expr, $handler:ident, $idx:ident, 0, 0, 0, 1, 0, 1, 1, 1) => {}; + ($idt:expr, $handler:ident, $idx:ident, 0, 0, 0, 1, 1, 0, 0, 0) => {}; + ($idt:expr, $handler:ident, $idx:ident, 0, 0, 0, 1, 1, 0, 0, 1) => {}; + ($idt:expr, $handler:ident, $idx:ident, 0, 0, 0, 1, 1, 0, 1, 0) => {}; + ($idt:expr, $handler:ident, $idx:ident, 0, 0, 0, 1, 1, 0, 1, 1) => {}; + ($idt:expr, $handler:ident, $idx:ident, 0, 0, 0, 1, 1, 1, 0, 0) => {}; + // reserved_3 + ($idt:expr, $handler:ident, $idx:ident, 0, 0, 0, 1, 1, 1, 1, 1) => {}; + + // set entries with `HandlerFunc` signature + ($idt:expr, $handler:ident, $idx:ident $(, $_bits:tt)*) => {{ + extern "x86-interrupt" fn handler(frame: $crate::structures::idt::InterruptStackFrame) { + $handler(frame, $idx.into(), None); + } + $idt[$idx].set_handler_fn(handler); + }}; +} + #[cfg(test)] mod test { use super::*; + fn entry_present(idt: &InterruptDescriptorTable, index: u8) -> bool { + let options = match index { + 8 => &idt.double_fault.options, + 10 => &idt.invalid_tss.options, + 11 => &idt.segment_not_present.options, + 12 => &idt.stack_segment_fault.options, + 13 => &idt.general_protection_fault.options, + 14 => &idt.page_fault.options, + 15 => &idt.reserved_1.options, + 17 => &idt.alignment_check.options, + 18 => &idt.machine_check.options, + i @ 21..=28 => &idt.reserved_2[usize::from(i) - 21].options, + 29 => &idt.vmm_communication_exception.options, + 30 => &idt.security_exception.options, + 31 => &idt.reserved_3.options, + other => &idt[other].options, + }; + options.bits.get_bit(15) + } + #[test] fn size_test() { use core::mem::size_of; @@ -1075,6 +1431,55 @@ mod test { assert_eq!(size_of::(), 40); } + #[cfg(all(feature = "instructions", feature = "abi_x86_interrupt"))] + // there seems to be a bug in LLVM that causes rustc to crash on windows when compiling this test: + // https://github.com/rust-osdev/x86_64/pull/285#issuecomment-962642984 + #[cfg(not(windows))] + #[test] + fn default_handlers() { + fn general_handler( + _stack_frame: InterruptStackFrame, + _index: u8, + _error_code: Option, + ) { + } + + let mut idt = InterruptDescriptorTable::new(); + set_general_handler!(&mut idt, general_handler, 0); + for i in 0..=255 { + if i == 0 { + assert!(entry_present(&idt, i)); + } else { + assert!(!entry_present(&idt, i)); + } + } + set_general_handler!(&mut idt, general_handler, 14); + for i in 0..=255 { + if i == 0 || i == 14 { + assert!(entry_present(&idt, i)); + } else { + assert!(!entry_present(&idt, i)); + } + } + set_general_handler!(&mut idt, general_handler, 32..64); + for i in 1..=255 { + if i == 0 || i == 14 || (32..64).contains(&i) { + assert!(entry_present(&idt, i), "{}", i); + } else { + assert!(!entry_present(&idt, i)); + } + } + set_general_handler!(&mut idt, general_handler); + for i in 0..=255 { + if i == 15 || i == 31 || (21..=28).contains(&i) { + // reserved entries should not be set + assert!(!entry_present(&idt, i)); + } else { + assert!(entry_present(&idt, i)); + } + } + } + #[test] fn entry_derive_test() { fn foo(_: impl Clone + Copy + PartialEq + fmt::Debug) {} @@ -1088,4 +1493,23 @@ mod test { phantom: PhantomData, }) } + + #[test] + fn isr_frame_manipulation() { + let mut frame = InterruptStackFrame(InterruptStackFrameValue { + instruction_pointer: VirtAddr::new(0x1000), + code_segment: SegmentSelector(0), + cpu_flags: RFlags::empty(), + stack_pointer: VirtAddr::new(0x2000), + stack_segment: SegmentSelector(0), + _reserved1: Default::default(), + _reserved2: Default::default(), + }); + + unsafe { + frame + .as_mut() + .update(|f| f.instruction_pointer = f.instruction_pointer + 2u64); + } + } } diff --git a/src/structures/paging/frame.rs b/src/structures/paging/frame.rs index f125404fc..d64eb6b4f 100644 --- a/src/structures/paging/frame.rs +++ b/src/structures/paging/frame.rs @@ -8,7 +8,7 @@ use core::marker::PhantomData; use core::ops::{Add, AddAssign, Sub, SubAssign}; /// A physical memory frame. -#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] #[repr(C)] pub struct PhysFrame { pub(crate) start_address: PhysAddr, // TODO: remove when start_address() is const @@ -24,7 +24,9 @@ impl PhysFrame { if !address.is_aligned(S::SIZE) { return Err(AddressNotAligned); } - Ok(PhysFrame::containing_address(address)) + + // SAFETY: correct address alignment is checked above + Ok(unsafe { PhysFrame::from_start_address_unchecked(address) }) } const_fn! { @@ -133,7 +135,7 @@ impl Sub> for PhysFrame { } /// An range of physical memory frames, exclusive the upper bound. -#[derive(Clone, Copy, PartialEq, Eq)] +#[derive(Clone, Copy, PartialEq, Eq, Hash)] #[repr(C)] pub struct PhysFrameRange { /// The start of the range, inclusive. @@ -175,7 +177,7 @@ impl fmt::Debug for PhysFrameRange { } /// An range of physical memory frames, inclusive the upper bound. -#[derive(Clone, Copy, PartialEq, Eq)] +#[derive(Clone, Copy, PartialEq, Eq, Hash)] #[repr(C)] pub struct PhysFrameRangeInclusive { /// The start of the range, inclusive. diff --git a/src/structures/paging/mapper/mapped_page_table.rs b/src/structures/paging/mapper/mapped_page_table.rs index 83878d5b8..5de2dd63b 100644 --- a/src/structures/paging/mapper/mapped_page_table.rs +++ b/src/structures/paging/mapper/mapped_page_table.rs @@ -1,9 +1,9 @@ use crate::structures::paging::{ frame::PhysFrame, - frame_alloc::FrameAllocator, + frame_alloc::{FrameAllocator, FrameDeallocator}, mapper::*, - page::{AddressNotAligned, Page, Size1GiB, Size2MiB, Size4KiB}, - page_table::{FrameError, PageTable, PageTableEntry, PageTableFlags}, + page::{AddressNotAligned, Page, PageRangeInclusive, Size1GiB, Size2MiB, Size4KiB}, + page_table::{FrameError, PageTable, PageTableEntry, PageTableFlags, PageTableLevel}, }; /// A Mapper implementation that relies on a PhysAddr to VirtAddr conversion function. @@ -33,7 +33,7 @@ impl<'a, P: PageTableFrameMapping> MappedPageTable<'a, P> { pub unsafe fn new(level_4_table: &'a mut PageTable, page_table_frame_mapping: P) -> Self { Self { level_4_table, - page_table_walker: PageTableWalker::new(page_table_frame_mapping), + page_table_walker: unsafe { PageTableWalker::new(page_table_frame_mapping) }, } } @@ -589,6 +589,96 @@ impl<'a, P: PageTableFrameMapping> Translate for MappedPageTable<'a, P> { } } +impl<'a, P: PageTableFrameMapping> CleanUp for MappedPageTable<'a, P> { + #[inline] + unsafe fn clean_up(&mut self, frame_deallocator: &mut D) + where + D: FrameDeallocator, + { + unsafe { + self.clean_up_addr_range( + PageRangeInclusive { + start: Page::from_start_address(VirtAddr::new(0)).unwrap(), + end: Page::from_start_address(VirtAddr::new(0xffff_ffff_ffff_f000)).unwrap(), + }, + frame_deallocator, + ) + } + } + + unsafe fn clean_up_addr_range( + &mut self, + range: PageRangeInclusive, + frame_deallocator: &mut D, + ) where + D: FrameDeallocator, + { + unsafe fn clean_up( + page_table: &mut PageTable, + page_table_walker: &PageTableWalker

, + level: PageTableLevel, + range: PageRangeInclusive, + frame_deallocator: &mut impl FrameDeallocator, + ) -> bool { + if range.is_empty() { + return false; + } + + let table_addr = range + .start + .start_address() + .align_down(level.table_address_space_alignment()); + + let start = range.start.page_table_index(level); + let end = range.end.page_table_index(level); + + if let Some(next_level) = level.next_lower_level() { + let offset_per_entry = level.entry_address_space_alignment(); + for (i, entry) in page_table + .iter_mut() + .enumerate() + .take(usize::from(end) + 1) + .skip(usize::from(start)) + { + if let Ok(page_table) = page_table_walker.next_table_mut(entry) { + let start = table_addr + (offset_per_entry * (i as u64)); + let end = start + (offset_per_entry - 1); + let start = Page::::containing_address(start); + let start = start.max(range.start); + let end = Page::::containing_address(end); + let end = end.min(range.end); + unsafe { + if clean_up( + page_table, + page_table_walker, + next_level, + Page::range_inclusive(start, end), + frame_deallocator, + ) { + let frame = entry.frame().unwrap(); + entry.set_unused(); + frame_deallocator.deallocate_frame(frame); + } + } + } + } + } + + page_table.iter().all(PageTableEntry::is_unused) + } + + unsafe { + clean_up( + self.level_4_table, + &self.page_table_walker, + PageTableLevel::Four, + range, + frame_deallocator, + ); + } + } +} + #[derive(Debug)] struct PageTableWalker { page_table_frame_mapping: P, diff --git a/src/structures/paging/mapper/mod.rs b/src/structures/paging/mapper/mod.rs index 1d83293d7..c044650b0 100644 --- a/src/structures/paging/mapper/mod.rs +++ b/src/structures/paging/mapper/mod.rs @@ -7,8 +7,10 @@ pub use self::offset_page_table::OffsetPageTable; pub use self::recursive_page_table::{InvalidPageTable, RecursivePageTable}; use crate::structures::paging::{ - frame_alloc::FrameAllocator, page_table::PageTableFlags, Page, PageSize, PhysFrame, Size1GiB, - Size2MiB, Size4KiB, + frame_alloc::{FrameAllocator, FrameDeallocator}, + page::PageRangeInclusive, + page_table::PageTableFlags, + Page, PageSize, PhysFrame, Size1GiB, Size2MiB, Size4KiB, }; use crate::{PhysAddr, VirtAddr}; @@ -190,7 +192,9 @@ pub trait Mapper { | PageTableFlags::WRITABLE | PageTableFlags::USER_ACCESSIBLE); - self.map_to_with_table_flags(page, frame, flags, parent_table_flags, frame_allocator) + unsafe { + self.map_to_with_table_flags(page, frame, flags, parent_table_flags, frame_allocator) + } } /// Creates a new mapping in the page table. @@ -366,7 +370,7 @@ pub trait Mapper { Self: Mapper, { let page = Page::containing_address(VirtAddr::new(frame.start_address().as_u64())); - self.map_to(page, frame, flags, frame_allocator) + unsafe { self.map_to(page, frame, flags, frame_allocator) } } } @@ -381,8 +385,11 @@ pub struct MapperFlush(Page); impl MapperFlush { /// Create a new flush promise + /// + /// Note that this method is intended for implementing the [`Mapper`] trait and no other uses + /// are expected. #[inline] - fn new(page: Page) -> Self { + pub fn new(page: Page) -> Self { MapperFlush(page) } @@ -403,14 +410,17 @@ impl MapperFlush { /// The old mapping might be still cached in the translation lookaside buffer (TLB), so it needs /// to be flushed from the TLB before it's accessed. This type is returned from a function that /// made the change to ensure that the TLB flush is not forgotten. -#[derive(Debug)] +#[derive(Debug, Default)] #[must_use = "Page Table changes must be flushed or ignored."] pub struct MapperFlushAll(()); impl MapperFlushAll { /// Create a new flush promise + /// + /// Note that this method is intended for implementing the [`Mapper`] trait and no other uses + /// are expected. #[inline] - fn new() -> Self { + pub fn new() -> Self { MapperFlushAll(()) } @@ -474,3 +484,45 @@ pub enum TranslateError { } static _ASSERT_OBJECT_SAFE: Option<&(dyn Translate + Sync)> = None; + +/// Provides methods for cleaning up unused entries. +pub trait CleanUp { + /// Remove all empty P1-P3 tables + /// + /// ## Safety + /// + /// The caller has to guarantee that it's safe to free page table frames: + /// All page table frames must only be used once and only in this page table + /// (e.g. no reference counted page tables or reusing the same page tables for different virtual addresses ranges in the same page table). + unsafe fn clean_up(&mut self, frame_deallocator: &mut D) + where + D: FrameDeallocator; + + /// Remove all empty P1-P3 tables in a certain range + /// ``` + /// # use core::ops::RangeInclusive; + /// # use x86_64::{VirtAddr, structures::paging::{ + /// # FrameDeallocator, Size4KiB, MappedPageTable, mapper::{RecursivePageTable, CleanUp}, page::{Page, PageRangeInclusive}, + /// # }}; + /// # unsafe fn test(page_table: &mut RecursivePageTable, frame_deallocator: &mut impl FrameDeallocator) { + /// // clean up all page tables in the lower half of the address space + /// let lower_half = Page::range_inclusive( + /// Page::containing_address(VirtAddr::new(0)), + /// Page::containing_address(VirtAddr::new(0x0000_7fff_ffff_ffff)), + /// ); + /// page_table.clean_up_addr_range(lower_half, frame_deallocator); + /// # } + /// ``` + /// + /// ## Safety + /// + /// The caller has to guarantee that it's safe to free page table frames: + /// All page table frames must only be used once and only in this page table + /// (e.g. no reference counted page tables or reusing the same page tables for different virtual addresses ranges in the same page table). + unsafe fn clean_up_addr_range( + &mut self, + range: PageRangeInclusive, + frame_deallocator: &mut D, + ) where + D: FrameDeallocator; +} diff --git a/src/structures/paging/mapper/offset_page_table.rs b/src/structures/paging/mapper/offset_page_table.rs index a0b1d5eab..f735500ea 100644 --- a/src/structures/paging/mapper/offset_page_table.rs +++ b/src/structures/paging/mapper/offset_page_table.rs @@ -1,7 +1,8 @@ #![cfg(target_pointer_width = "64")] use crate::structures::paging::{ - frame::PhysFrame, mapper::*, page_table::PageTable, Page, PageTableFlags, + frame::PhysFrame, mapper::*, page::PageRangeInclusive, page_table::PageTable, FrameDeallocator, + Page, PageTableFlags, }; /// A Mapper implementation that requires that the complete physically memory is mapped at some @@ -33,7 +34,7 @@ impl<'a> OffsetPageTable<'a> { offset: phys_offset, }; Self { - inner: MappedPageTable::new(level_4_table, phys_offset), + inner: unsafe { MappedPageTable::new(level_4_table, phys_offset) }, } } @@ -75,8 +76,10 @@ impl<'a> Mapper for OffsetPageTable<'a> { where A: FrameAllocator + ?Sized, { - self.inner - .map_to_with_table_flags(page, frame, flags, parent_table_flags, allocator) + unsafe { + self.inner + .map_to_with_table_flags(page, frame, flags, parent_table_flags, allocator) + } } #[inline] @@ -93,7 +96,7 @@ impl<'a> Mapper for OffsetPageTable<'a> { page: Page, flags: PageTableFlags, ) -> Result, FlagUpdateError> { - self.inner.update_flags(page, flags) + unsafe { self.inner.update_flags(page, flags) } } #[inline] @@ -102,7 +105,7 @@ impl<'a> Mapper for OffsetPageTable<'a> { page: Page, flags: PageTableFlags, ) -> Result { - self.inner.set_flags_p4_entry(page, flags) + unsafe { self.inner.set_flags_p4_entry(page, flags) } } #[inline] @@ -111,7 +114,7 @@ impl<'a> Mapper for OffsetPageTable<'a> { page: Page, flags: PageTableFlags, ) -> Result { - self.inner.set_flags_p3_entry(page, flags) + unsafe { self.inner.set_flags_p3_entry(page, flags) } } #[inline] @@ -120,7 +123,7 @@ impl<'a> Mapper for OffsetPageTable<'a> { page: Page, flags: PageTableFlags, ) -> Result { - self.inner.set_flags_p2_entry(page, flags) + unsafe { self.inner.set_flags_p2_entry(page, flags) } } #[inline] @@ -142,8 +145,10 @@ impl<'a> Mapper for OffsetPageTable<'a> { where A: FrameAllocator + ?Sized, { - self.inner - .map_to_with_table_flags(page, frame, flags, parent_table_flags, allocator) + unsafe { + self.inner + .map_to_with_table_flags(page, frame, flags, parent_table_flags, allocator) + } } #[inline] @@ -160,7 +165,7 @@ impl<'a> Mapper for OffsetPageTable<'a> { page: Page, flags: PageTableFlags, ) -> Result, FlagUpdateError> { - self.inner.update_flags(page, flags) + unsafe { self.inner.update_flags(page, flags) } } #[inline] @@ -169,7 +174,7 @@ impl<'a> Mapper for OffsetPageTable<'a> { page: Page, flags: PageTableFlags, ) -> Result { - self.inner.set_flags_p4_entry(page, flags) + unsafe { self.inner.set_flags_p4_entry(page, flags) } } #[inline] @@ -178,7 +183,7 @@ impl<'a> Mapper for OffsetPageTable<'a> { page: Page, flags: PageTableFlags, ) -> Result { - self.inner.set_flags_p3_entry(page, flags) + unsafe { self.inner.set_flags_p3_entry(page, flags) } } #[inline] @@ -187,7 +192,7 @@ impl<'a> Mapper for OffsetPageTable<'a> { page: Page, flags: PageTableFlags, ) -> Result { - self.inner.set_flags_p2_entry(page, flags) + unsafe { self.inner.set_flags_p2_entry(page, flags) } } #[inline] @@ -209,8 +214,10 @@ impl<'a> Mapper for OffsetPageTable<'a> { where A: FrameAllocator + ?Sized, { - self.inner - .map_to_with_table_flags(page, frame, flags, parent_table_flags, allocator) + unsafe { + self.inner + .map_to_with_table_flags(page, frame, flags, parent_table_flags, allocator) + } } #[inline] @@ -227,7 +234,7 @@ impl<'a> Mapper for OffsetPageTable<'a> { page: Page, flags: PageTableFlags, ) -> Result, FlagUpdateError> { - self.inner.update_flags(page, flags) + unsafe { self.inner.update_flags(page, flags) } } #[inline] @@ -236,7 +243,7 @@ impl<'a> Mapper for OffsetPageTable<'a> { page: Page, flags: PageTableFlags, ) -> Result { - self.inner.set_flags_p4_entry(page, flags) + unsafe { self.inner.set_flags_p4_entry(page, flags) } } #[inline] @@ -245,7 +252,7 @@ impl<'a> Mapper for OffsetPageTable<'a> { page: Page, flags: PageTableFlags, ) -> Result { - self.inner.set_flags_p3_entry(page, flags) + unsafe { self.inner.set_flags_p3_entry(page, flags) } } #[inline] @@ -254,7 +261,7 @@ impl<'a> Mapper for OffsetPageTable<'a> { page: Page, flags: PageTableFlags, ) -> Result { - self.inner.set_flags_p2_entry(page, flags) + unsafe { self.inner.set_flags_p2_entry(page, flags) } } #[inline] @@ -269,3 +276,24 @@ impl<'a> Translate for OffsetPageTable<'a> { self.inner.translate(addr) } } + +impl<'a> CleanUp for OffsetPageTable<'a> { + #[inline] + unsafe fn clean_up(&mut self, frame_deallocator: &mut D) + where + D: FrameDeallocator, + { + unsafe { self.inner.clean_up(frame_deallocator) } + } + + #[inline] + unsafe fn clean_up_addr_range( + &mut self, + range: PageRangeInclusive, + frame_deallocator: &mut D, + ) where + D: FrameDeallocator, + { + unsafe { self.inner.clean_up_addr_range(range, frame_deallocator) } + } +} diff --git a/src/structures/paging/mapper/recursive_page_table.rs b/src/structures/paging/mapper/recursive_page_table.rs index bb6df2cfe..af8af5af2 100644 --- a/src/structures/paging/mapper/recursive_page_table.rs +++ b/src/structures/paging/mapper/recursive_page_table.rs @@ -4,12 +4,12 @@ use core::fmt; use super::*; use crate::registers::control::Cr3; -use crate::structures::paging::PageTableIndex; +use crate::structures::paging::page_table::PageTableLevel; use crate::structures::paging::{ frame_alloc::FrameAllocator, - page::{AddressNotAligned, NotGiantPageSize}, + page::{AddressNotAligned, NotGiantPageSize, PageRangeInclusive}, page_table::{FrameError, PageTable, PageTableEntry, PageTableFlags}, - Page, PageSize, PhysFrame, Size1GiB, Size2MiB, Size4KiB, + FrameDeallocator, Page, PageSize, PageTableIndex, PhysFrame, Size1GiB, Size2MiB, Size4KiB, }; use crate::VirtAddr; @@ -339,8 +339,6 @@ impl<'a> Mapper for RecursivePageTable<'a> { Ok((frame, MapperFlush::new(page))) } - // allow unused_unsafe until https://github.com/rust-lang/rfcs/pull/2585 lands - #[allow(unused_unsafe)] unsafe fn update_flags( &mut self, page: Page, @@ -467,8 +465,6 @@ impl<'a> Mapper for RecursivePageTable<'a> { Ok((frame, MapperFlush::new(page))) } - // allow unused_unsafe until https://github.com/rust-lang/rfcs/pull/2585 lands - #[allow(unused_unsafe)] unsafe fn update_flags( &mut self, page: Page, @@ -526,7 +522,7 @@ impl<'a> Mapper for RecursivePageTable<'a> { return Err(FlagUpdateError::PageNotMapped); } - let p3 = &mut *(p3_ptr(page, self.recursive_index)); + let p3 = unsafe { &mut *(p3_ptr(page, self.recursive_index)) }; let p3_entry = &mut p3[page.p3_index()]; if p3_entry.is_unused() { @@ -625,8 +621,6 @@ impl<'a> Mapper for RecursivePageTable<'a> { Ok((frame, MapperFlush::new(page))) } - // allow unused_unsafe until https://github.com/rust-lang/rfcs/pull/2585 lands - #[allow(unused_unsafe)] unsafe fn update_flags( &mut self, page: Page, @@ -689,7 +683,7 @@ impl<'a> Mapper for RecursivePageTable<'a> { return Err(FlagUpdateError::PageNotMapped); } - let p3 = &mut *(p3_ptr(page, self.recursive_index)); + let p3 = unsafe { &mut *(p3_ptr(page, self.recursive_index)) }; let p3_entry = &mut p3[page.p3_index()]; if p3_entry.is_unused() { @@ -712,13 +706,13 @@ impl<'a> Mapper for RecursivePageTable<'a> { return Err(FlagUpdateError::PageNotMapped); } - let p3 = &mut *(p3_ptr(page, self.recursive_index)); + let p3 = unsafe { &mut *(p3_ptr(page, self.recursive_index)) }; if p3[page.p3_index()].is_unused() { return Err(FlagUpdateError::PageNotMapped); } - let p2 = &mut *(p2_ptr(page, self.recursive_index)); + let p2 = unsafe { &mut *(p2_ptr(page, self.recursive_index)) }; let p2_entry = &mut p2[page.p2_index()]; if p2_entry.is_unused() { @@ -834,6 +828,99 @@ impl<'a> Translate for RecursivePageTable<'a> { } } +impl<'a> CleanUp for RecursivePageTable<'a> { + #[inline] + unsafe fn clean_up(&mut self, frame_deallocator: &mut D) + where + D: FrameDeallocator, + { + unsafe { + self.clean_up_addr_range( + PageRangeInclusive { + start: Page::from_start_address(VirtAddr::new(0)).unwrap(), + end: Page::from_start_address(VirtAddr::new(0xffff_ffff_ffff_f000)).unwrap(), + }, + frame_deallocator, + ) + } + } + + unsafe fn clean_up_addr_range( + &mut self, + range: PageRangeInclusive, + frame_deallocator: &mut D, + ) where + D: FrameDeallocator, + { + fn clean_up( + recursive_index: PageTableIndex, + page_table: &mut PageTable, + level: PageTableLevel, + range: PageRangeInclusive, + frame_deallocator: &mut impl FrameDeallocator, + ) -> bool { + if range.is_empty() { + return false; + } + + let table_addr = range + .start + .start_address() + .align_down(level.table_address_space_alignment()); + + let start = range.start.page_table_index(level); + let end = range.end.page_table_index(level); + + if let Some(next_level) = level.next_lower_level() { + let offset_per_entry = level.entry_address_space_alignment(); + for (i, entry) in page_table + .iter_mut() + .enumerate() + .take(usize::from(end) + 1) + .skip(usize::from(start)) + .filter(|(i, _)| { + !(level == PageTableLevel::Four && *i == recursive_index.into()) + }) + { + if let Ok(frame) = entry.frame() { + let start = table_addr + (offset_per_entry * (i as u64)); + let end = start + (offset_per_entry - 1); + let start = Page::::containing_address(start); + let start = start.max(range.start); + let end = Page::::containing_address(end); + let end = end.min(range.end); + let page_table = + [p1_ptr, p2_ptr, p3_ptr][level as usize - 2](start, recursive_index); + let page_table = unsafe { &mut *page_table }; + if clean_up( + recursive_index, + page_table, + next_level, + Page::range_inclusive(start, end), + frame_deallocator, + ) { + entry.set_unused(); + unsafe { + frame_deallocator.deallocate_frame(frame); + } + } + } + } + } + + page_table.iter().all(PageTableEntry::is_unused) + } + + clean_up( + self.recursive_index, + self.level_4_table_mut(), + PageTableLevel::Four, + range, + frame_deallocator, + ); + } +} + /// The given page table was not suitable to create a `RecursivePageTable`. #[derive(Debug)] pub enum InvalidPageTable { diff --git a/src/structures/paging/page.rs b/src/structures/paging/page.rs index 1169946a5..4a7a9f650 100644 --- a/src/structures/paging/page.rs +++ b/src/structures/paging/page.rs @@ -1,5 +1,6 @@ //! Abstractions for default-sized and huge virtual memory pages. +use crate::structures::paging::page_table::PageTableLevel; use crate::structures::paging::PageTableIndex; use crate::VirtAddr; use core::fmt; @@ -52,7 +53,7 @@ impl PageSize for Size1GiB { } /// A virtual memory page. -#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] #[repr(C)] pub struct Page { start_address: VirtAddr, @@ -130,6 +131,14 @@ impl Page { } } + const_fn! { + /// Returns the table index of this page at the specified level. + #[inline] + pub fn page_table_index(self, level: PageTableLevel) -> PageTableIndex { + self.start_address().page_table_index(level) + } + } + const_fn! { /// Returns a range of pages, exclusive `end`. #[inline] @@ -266,7 +275,7 @@ impl Sub for Page { } /// A range of pages with exclusive upper bound. -#[derive(Clone, Copy, PartialEq, Eq)] +#[derive(Clone, Copy, PartialEq, Eq, Hash)] #[repr(C)] pub struct PageRange { /// The start of the range, inclusive. @@ -319,7 +328,7 @@ impl fmt::Debug for PageRange { } /// A range of pages with inclusive upper bound. -#[derive(Clone, Copy, PartialEq, Eq)] +#[derive(Clone, Copy, PartialEq, Eq, Hash)] #[repr(C)] pub struct PageRangeInclusive { /// The start of the range, inclusive. diff --git a/src/structures/paging/page_table.rs b/src/structures/paging/page_table.rs index ac83a2c52..5ae35298e 100644 --- a/src/structures/paging/page_table.rs +++ b/src/structures/paging/page_table.rs @@ -268,7 +268,7 @@ impl fmt::Debug for PageTable { /// Can be used to select one of the 512 entries of a page table. /// /// Guaranteed to only ever contain 0..512. -#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] +#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct PageTableIndex(u16); impl PageTableIndex { @@ -319,7 +319,7 @@ impl From for usize { /// This type is returned by the `VirtAddr::page_offset` method. /// /// Guaranteed to only ever contain 0..4096. -#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] +#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct PageOffset(u16); impl PageOffset { @@ -364,3 +364,38 @@ impl From for usize { usize::from(offset.0) } } + +#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +/// A value between 1 and 4. +pub enum PageTableLevel { + /// Represents the level for a page table. + One = 1, + /// Represents the level for a page directory. + Two, + /// Represents the level for a page-directory pointer. + Three, + /// Represents the level for a page-map level-4. + Four, +} + +impl PageTableLevel { + /// Returns the next lower level or `None` for level 1 + pub const fn next_lower_level(self) -> Option { + match self { + PageTableLevel::Four => Some(PageTableLevel::Three), + PageTableLevel::Three => Some(PageTableLevel::Two), + PageTableLevel::Two => Some(PageTableLevel::One), + PageTableLevel::One => None, + } + } + + /// Returns the alignment for the address space described by a table of this level. + pub const fn table_address_space_alignment(self) -> u64 { + 1u64 << (self as u8 * 9 + 12) + } + + /// Returns the alignment for the address space described by an entry in a table of this level. + pub const fn entry_address_space_alignment(self) -> u64 { + 1u64 << (((self as u8 - 1) * 9) + 12) + } +}