Skip to content

Use core::ops::Range and core::ops::RangeInclusive instead of custom range structs #517

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 46 commits into from
Closed
Changes from all commits
Commits
Show all changes
46 commits
Select commit Hold shift + click to select a range
a9c3dc8
fix indentation in doc comments
Freax13 May 24, 2024
a98580e
fix warnings in tests
Freax13 May 24, 2024
9a062df
Merge pull request #488 from Freax13/fix_indentation
phil-opp May 24, 2024
3eaf06f
add private const variants for PhysAddr functions
Freax13 Jun 9, 2024
c5bc9fc
constify PhysFrame functions
Freax13 Jun 9, 2024
24691eb
Merge pull request #489 from rust-osdev/feature/const-phys-frame
phil-opp Jun 10, 2024
b0843da
Ensure that Page actually implements Hash
Wasabi375 Jul 4, 2024
2d3bd56
Merge pull request #490 from Wasabi375/zst_hash
Freax13 Jul 4, 2024
3fec974
don't use label starting with `1`
Freax13 Jul 14, 2024
1d8a69c
Merge pull request #492 from rust-osdev/fix/nightly-2024-07-14
phil-opp Jul 22, 2024
ae65fb6
Add size and len for PageRange, PhysFrameRange etc
Wasabi375 Jul 14, 2024
a59bf9f
Merge pull request #491 from Wasabi375/ranges
Freax13 Jul 22, 2024
b349961
fix warning in integration test
Freax13 Jul 31, 2024
df502ff
Merge pull request #495 from rust-osdev/fix-unsafe-warning
phil-opp Jul 31, 2024
9dca072
remove #![feature(asm_const)]
Freax13 Aug 15, 2024
7b47163
Merge pull request #496 from rust-osdev/fix/nightyly-2024-08-15
Freax13 Aug 15, 2024
d3e28e9
Remove stabilized `const_mut_refs` feature
phil-opp Oct 7, 2024
eec5d25
Break long doc paragraph
phil-opp Oct 7, 2024
1829dc1
Elide explicit lifetimes when possible
phil-opp Oct 7, 2024
8adb26d
Merge pull request #501 from rust-osdev/remove-const_mut_refs-feature
Freax13 Oct 8, 2024
323d46c
Merge pull request #502 from rust-osdev/fix-clippy-warnings
Freax13 Oct 8, 2024
0b5476e
gate HandlerFunc behind target_arch = "x86{_64}"
Freax13 Oct 21, 2024
4bd1973
fix field order for INVPCID descriptor
Freax13 Nov 3, 2024
f85f015
Merge pull request #508 from rust-osdev/fix/pcid-order
Freax13 Nov 10, 2024
e045300
fix CI job for building on MSRV
Freax13 Nov 12, 2024
def4212
only make append & push const on Rust 1.83+
Freax13 Nov 12, 2024
3fc9106
Merge pull request #510 from rust-osdev/fix/ci-msrv
Freax13 Nov 13, 2024
7525088
Merge pull request #507 from rust-osdev/fix/gate-handlers
Freax13 Nov 15, 2024
199d614
fix typo in "InvPicdCommand"
Freax13 Nov 10, 2024
3943178
Merge pull request #509 from rust-osdev/fix/inv-picd-command
Freax13 Nov 15, 2024
6e0652f
TryFrom implementation for ExceptionVector
mrjbom Oct 20, 2024
08e0172
Merge pull request #506 from mrjbom/TryFrom_for_ExceptionVector
Freax13 Nov 16, 2024
87d1eb1
More precise comments in TaskStateSegment
mrjbom Oct 19, 2024
7011547
More precise comments in TaskStateSegment
mrjbom Oct 21, 2024
a9187de
More precise comments in TaskStateSegment
mrjbom Oct 21, 2024
6d94a46
More precise comments in TaskStateSegment
mrjbom Oct 21, 2024
dc222cb
Merge pull request #504 from mrjbom/master
Freax13 Nov 16, 2024
f9c352b
Minor clarification
mrjbom Oct 19, 2024
e016a4c
Merge pull request #503 from mrjbom/mrjbom-gdt-patch-1
Freax13 Nov 16, 2024
b9e3d25
fix signature of Step::steps_between implementations
Freax13 Nov 24, 2024
3d65714
actually test with slice instead of array
Freax13 Nov 24, 2024
1e5b5a1
fix tests on 32-bit platforms
Freax13 Nov 24, 2024
8fb0668
run tests on a 32-bit platform
Freax13 Nov 24, 2024
b4b6663
fix Page Step impl on 32-bit platforms
Freax13 Nov 24, 2024
13f1450
Use core::ops::Range where possible
ChocolateLoverRaj Nov 28, 2024
2f0d9de
Add iter_pages() for use without Step trait
ChocolateLoverRaj Nov 30, 2024
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 8 additions & 2 deletions .github/workflows/build.yml
Original file line number Diff line number Diff line change
@@ -26,6 +26,9 @@ jobs:
- nightly
- 1.59
runs-on: ubuntu-latest
env:
# rustup prioritizes environment variables over rust-toolchain.toml files.
RUSTUP_TOOLCHAIN: ${{ matrix.rust }}
steps:
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@master
@@ -53,7 +56,7 @@ jobs:
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@nightly
with:
targets: x86_64-unknown-linux-musl, i686-unknown-linux-gnu, thumbv7em-none-eabihf
targets: x86_64-unknown-linux-musl, i686-unknown-linux-musl, thumbv7em-none-eabihf

- run: cargo build

@@ -69,9 +72,12 @@ jobs:

- name: "Build on non x86_64 platforms"
run: |
cargo build --target i686-unknown-linux-gnu --no-default-features --features nightly
cargo build --target i686-unknown-linux-musl --no-default-features --features nightly
cargo build --target thumbv7em-none-eabihf --no-default-features --features nightly
- run: cargo test --target i686-unknown-linux-musl --no-default-features --features nightly
if: runner.os == 'Linux'

bootloader-test:
name: "Bootloader Integration Test"

3 changes: 2 additions & 1 deletion Cargo.toml
Original file line number Diff line number Diff line change
@@ -28,8 +28,9 @@ rustversion = "1.0.5"
[features]
default = ["nightly", "instructions"]
instructions = []
nightly = [ "const_fn", "step_trait", "abi_x86_interrupt", "asm_const" ]
nightly = ["const_fn", "step_trait", "abi_x86_interrupt", "asm_const"]
abi_x86_interrupt = []
# deprecated, no longer needed
const_fn = []
asm_const = []
step_trait = []
4 changes: 4 additions & 0 deletions Changelog.md
Original file line number Diff line number Diff line change
@@ -1,5 +1,9 @@
# Unreleased

## New Features

- [add `size` and `len` for `PageRange`, `PhysFrameRange`, `PageRangeInclusive` and `PhysFrameRangeInclusive`](https://github.com/rust-osdev/x86_64/pull/491)

# 0.15.1 – 2024-03-19

## New Features
150 changes: 111 additions & 39 deletions src/addr.rs
Original file line number Diff line number Diff line change
@@ -240,25 +240,43 @@ impl VirtAddr {
}

// FIXME: Move this into the `Step` impl, once `Step` is stabilized.
#[cfg(feature = "step_trait")]
pub(crate) fn steps_between_impl(start: &Self, end: &Self) -> (usize, Option<usize>) {
if let Some(steps) = Self::steps_between_u64(start, end) {
let steps = usize::try_from(steps).ok();
(steps.unwrap_or(usize::MAX), steps)
} else {
(0, None)
}
}

/// An implementation of steps_between that returns u64. Note that this
/// function always returns the exact bound, so it doesn't need to return a
/// lower and upper bound like steps_between does.
#[cfg(any(feature = "instructions", feature = "step_trait"))]
pub(crate) fn steps_between_impl(start: &Self, end: &Self) -> Option<usize> {
pub(crate) fn steps_between_u64(start: &Self, end: &Self) -> Option<u64> {
let mut steps = end.0.checked_sub(start.0)?;

// Mask away extra bits that appear while jumping the gap.
steps &= 0xffff_ffff_ffff;

usize::try_from(steps).ok()
Some(steps)
}

// FIXME: Move this into the `Step` impl, once `Step` is stabilized.
#[inline]
pub(crate) fn forward_checked_impl(start: Self, count: usize) -> Option<Self> {
let offset = u64::try_from(count).ok()?;
if offset > ADDRESS_SPACE_SIZE {
Self::forward_checked_u64(start, u64::try_from(count).ok()?)
}

/// An implementation of forward_checked that takes u64 instead of usize.
#[inline]
pub(crate) fn forward_checked_u64(start: Self, count: u64) -> Option<Self> {
if count > ADDRESS_SPACE_SIZE {
return None;
}

let mut addr = start.0.checked_add(offset)?;
let mut addr = start.0.checked_add(count)?;

match addr.get_bits(47..) {
0x1 => {
@@ -274,6 +292,31 @@ impl VirtAddr {

Some(unsafe { Self::new_unsafe(addr) })
}

/// An implementation of backward_checked that takes u64 instead of usize.
#[cfg(feature = "step_trait")]
#[inline]
pub(crate) fn backward_checked_u64(start: Self, count: u64) -> Option<Self> {
if count > ADDRESS_SPACE_SIZE {
return None;
}

let mut addr = start.0.checked_sub(count)?;

match addr.get_bits(47..) {
0x1fffe => {
// Jump the gap by sign extending the 47th bit.
addr.set_bits(47.., 0);
}
0x1fffd => {
// Address underflow
return None;
}
_ => {}
}

Some(unsafe { Self::new_unsafe(addr) })
}
}

impl fmt::Debug for VirtAddr {
@@ -360,7 +403,7 @@ impl Sub<VirtAddr> for VirtAddr {
#[cfg(feature = "step_trait")]
impl Step for VirtAddr {
#[inline]
fn steps_between(start: &Self, end: &Self) -> Option<usize> {
fn steps_between(start: &Self, end: &Self) -> (usize, Option<usize>) {
Self::steps_between_impl(start, end)
}

@@ -371,26 +414,7 @@ impl Step for VirtAddr {

#[inline]
fn backward_checked(start: Self, count: usize) -> Option<Self> {
let offset = u64::try_from(count).ok()?;
if offset > ADDRESS_SPACE_SIZE {
return None;
}

let mut addr = start.0.checked_sub(offset)?;

match addr.get_bits(47..) {
0x1fffe => {
// Jump the gap by sign extending the 47th bit.
addr.set_bits(47.., 0);
}
0x1fffd => {
// Address underflow
return None;
}
_ => {}
}

Some(unsafe { Self::new_unsafe(addr) })
Self::backward_checked_u64(start, u64::try_from(count).ok()?)
}
}

@@ -495,7 +519,15 @@ impl PhysAddr {
where
U: Into<u64>,
{
PhysAddr(align_down(self.0, align.into()))
self.align_down_u64(align.into())
}

/// Aligns the physical address downwards to the given alignment.
///
/// See the `align_down` function for more information.
#[inline]
pub(crate) const fn align_down_u64(self, align: u64) -> Self {
PhysAddr(align_down(self.0, align))
}

/// Checks whether the physical address has the demanded alignment.
@@ -504,7 +536,13 @@ impl PhysAddr {
where
U: Into<u64>,
{
self.align_down(align) == self
self.is_aligned_u64(align.into())
}

/// Checks whether the physical address has the demanded alignment.
#[inline]
pub(crate) const fn is_aligned_u64(self, align: u64) -> bool {
self.align_down_u64(align).as_u64() == self.as_u64()
}
}

@@ -650,22 +688,27 @@ mod tests {
Step::forward_checked(VirtAddr(0xffff_ffff_ffff_ffff), 1),
None
);
#[cfg(target_pointer_width = "64")]
assert_eq!(
Step::forward(VirtAddr(0x7fff_ffff_ffff), 0x1234_5678_9abd),
VirtAddr(0xffff_9234_5678_9abc)
);
#[cfg(target_pointer_width = "64")]
assert_eq!(
Step::forward(VirtAddr(0x7fff_ffff_ffff), 0x8000_0000_0000),
VirtAddr(0xffff_ffff_ffff_ffff)
);
#[cfg(target_pointer_width = "64")]
assert_eq!(
Step::forward(VirtAddr(0x7fff_ffff_ff00), 0x8000_0000_00ff),
VirtAddr(0xffff_ffff_ffff_ffff)
);
#[cfg(target_pointer_width = "64")]
assert_eq!(
Step::forward_checked(VirtAddr(0x7fff_ffff_ff00), 0x8000_0000_0100),
None
);
#[cfg(target_pointer_width = "64")]
assert_eq!(
Step::forward_checked(VirtAddr(0x7fff_ffff_ffff), 0x8000_0000_0001),
None
@@ -686,18 +729,22 @@ mod tests {
Step::backward(VirtAddr(0xffff_8000_0000_0001), 1),
VirtAddr(0xffff_8000_0000_0000)
);
#[cfg(target_pointer_width = "64")]
assert_eq!(
Step::backward(VirtAddr(0xffff_9234_5678_9abc), 0x1234_5678_9abd),
VirtAddr(0x7fff_ffff_ffff)
);
#[cfg(target_pointer_width = "64")]
assert_eq!(
Step::backward(VirtAddr(0xffff_8000_0000_0000), 0x8000_0000_0000),
VirtAddr(0)
);
#[cfg(target_pointer_width = "64")]
assert_eq!(
Step::backward(VirtAddr(0xffff_8000_0000_0000), 0x7fff_ffff_ff01),
VirtAddr(0xff)
);
#[cfg(target_pointer_width = "64")]
assert_eq!(
Step::backward_checked(VirtAddr(0xffff_8000_0000_0000), 0x8000_0000_0001),
None
@@ -707,43 +754,64 @@ mod tests {
#[test]
#[cfg(feature = "step_trait")]
fn virtaddr_steps_between() {
assert_eq!(Step::steps_between(&VirtAddr(0), &VirtAddr(0)), Some(0));
assert_eq!(Step::steps_between(&VirtAddr(0), &VirtAddr(1)), Some(1));
assert_eq!(Step::steps_between(&VirtAddr(1), &VirtAddr(0)), None);
assert_eq!(
Step::steps_between(&VirtAddr(0), &VirtAddr(0)),
(0, Some(0))
);
assert_eq!(
Step::steps_between(&VirtAddr(0), &VirtAddr(1)),
(1, Some(1))
);
assert_eq!(Step::steps_between(&VirtAddr(1), &VirtAddr(0)), (0, None));
assert_eq!(
Step::steps_between(
&VirtAddr(0x7fff_ffff_ffff),
&VirtAddr(0xffff_8000_0000_0000)
),
Some(1)
(1, Some(1))
);
assert_eq!(
Step::steps_between(
&VirtAddr(0xffff_8000_0000_0000),
&VirtAddr(0x7fff_ffff_ffff)
),
None
(0, None)
);
assert_eq!(
Step::steps_between(
&VirtAddr(0xffff_8000_0000_0000),
&VirtAddr(0xffff_8000_0000_0000)
),
Some(0)
(0, Some(0))
);
assert_eq!(
Step::steps_between(
&VirtAddr(0xffff_8000_0000_0000),
&VirtAddr(0xffff_8000_0000_0001)
),
Some(1)
(1, Some(1))
);
assert_eq!(
Step::steps_between(
&VirtAddr(0xffff_8000_0000_0001),
&VirtAddr(0xffff_8000_0000_0000)
),
None
(0, None)
);
// Make sure that we handle `steps > u32::MAX` correctly on 32-bit
// targets. On 64-bit targets, `0x1_0000_0000` fits into `usize`, so we
// can return exact lower and upper bounds. On 32-bit targets,
// `0x1_0000_0000` doesn't fit into `usize`, so we only return an lower
// bound of `usize::MAX` and don't return an upper bound.
#[cfg(target_pointer_width = "64")]
assert_eq!(
Step::steps_between(&VirtAddr(0), &VirtAddr(0x1_0000_0000)),
(0x1_0000_0000, Some(0x1_0000_0000))
);
#[cfg(not(target_pointer_width = "64"))]
assert_eq!(
Step::steps_between(&VirtAddr(0), &VirtAddr(0x1_0000_0000)),
(usize::MAX, None)
);
}

@@ -795,10 +863,14 @@ mod tests {
}

#[test]
#[cfg(target_pointer_width = "64")]
fn test_from_ptr_array() {
let slice = &[1, 2, 3, 4, 5];
// Make sure that from_ptr(slice) is the address of the first element
assert_eq!(VirtAddr::from_ptr(slice), VirtAddr::from_ptr(&slice[0]));
assert_eq!(
VirtAddr::from_ptr(slice.as_slice()),
VirtAddr::from_ptr(&slice[0])
);
}
}

@@ -937,7 +1009,7 @@ mod proofs {
};

// ...then `steps_between` succeeds as well.
assert!(Step::steps_between(&start, &end) == Some(count));
assert!(Step::steps_between(&start, &end) == (count, Some(count)));
}

// This harness proves that for all inputs for which `steps_between`
@@ -954,7 +1026,7 @@ mod proofs {
};

// If `steps_between` succeeds...
let Some(count) = Step::steps_between(&start, &end) else {
let Some(count) = Step::steps_between(&start, &end).1 else {
return;
};

7 changes: 5 additions & 2 deletions src/instructions/mod.rs
Original file line number Diff line number Diff line change
@@ -32,8 +32,11 @@ pub fn nop() {
}
}

/// Emits a '[magic breakpoint](https://wiki.osdev.org/Bochs#Magic_Breakpoint)' instruction for the [Bochs](http://bochs.sourceforge.net/) CPU
/// emulator. Make sure to set `magic_break: enabled=1` in your `.bochsrc` file.
/// Emits a '[magic breakpoint](https://wiki.osdev.org/Bochs#Magic_Breakpoint)'
/// instruction for the [Bochs](http://bochs.sourceforge.net/) CPU
/// emulator.
///
/// Make sure to set `magic_break: enabled=1` in your `.bochsrc` file.
#[inline]
pub fn bochs_breakpoint() {
unsafe {
4 changes: 2 additions & 2 deletions src/instructions/segmentation.rs
Original file line number Diff line number Diff line change
@@ -75,10 +75,10 @@ impl Segment for CS {
unsafe {
asm!(
"push {sel}",
"lea {tmp}, [1f + rip]",
"lea {tmp}, [55f + rip]",
"push {tmp}",
"retfq",
"1:",
"55:",
sel = in(reg) u64::from(sel.0),
tmp = lateout(reg) _,
options(preserves_flags),
40 changes: 21 additions & 19 deletions src/instructions/tlb.rs
Original file line number Diff line number Diff line change
@@ -4,13 +4,10 @@ use bit_field::BitField;

use crate::{
instructions::segmentation::{Segment, CS},
structures::paging::{
page::{NotGiantPageSize, PageRange},
Page, PageSize, Size2MiB, Size4KiB,
},
structures::paging::{page::NotGiantPageSize, Page, PageSize, Size2MiB, Size4KiB},
PrivilegeLevel, VirtAddr,
};
use core::{arch::asm, cmp, convert::TryFrom, fmt};
use core::{arch::asm, cmp, convert::TryFrom, fmt, ops::Range};

/// Invalidate the given address in the TLB using the `invlpg` instruction.
#[inline]
@@ -30,7 +27,7 @@ pub fn flush_all() {

/// The Invalidate PCID Command to execute.
#[derive(Debug)]
pub enum InvPicdCommand {
pub enum InvPcidCommand {
/// The logical processor invalidates mappings—except global translations—for the linear address and PCID specified.
Address(VirtAddr, Pcid),

@@ -44,13 +41,18 @@ pub enum InvPicdCommand {
AllExceptGlobal,
}

// TODO: Remove this in the next breaking release.
#[deprecated = "please use `InvPcidCommand` instead"]
#[doc(hidden)]
pub type InvPicdCommand = InvPcidCommand;

/// The INVPCID descriptor comprises 128 bits and consists of a PCID and a linear address.
/// For INVPCID type 0, the processor uses the full 64 bits of the linear address even outside 64-bit mode; the linear address is not used for other INVPCID types.
#[repr(C)]
#[derive(Debug)]
struct InvpcidDescriptor {
address: u64,
pcid: u64,
address: u64,
}

/// Structure of a PCID. A PCID has to be <= 4096 for x86_64.
@@ -93,25 +95,25 @@ impl fmt::Display for PcidTooBig {
///
/// This function is unsafe as it requires CPUID.(EAX=07H, ECX=0H):EBX.INVPCID to be 1.
#[inline]
pub unsafe fn flush_pcid(command: InvPicdCommand) {
pub unsafe fn flush_pcid(command: InvPcidCommand) {
let mut desc = InvpcidDescriptor {
address: 0,
pcid: 0,
address: 0,
};

let kind: u64;
match command {
InvPicdCommand::Address(addr, pcid) => {
InvPcidCommand::Address(addr, pcid) => {
kind = 0;
desc.pcid = pcid.value().into();
desc.address = addr.as_u64()
}
InvPicdCommand::Single(pcid) => {
InvPcidCommand::Single(pcid) => {
kind = 1;
desc.pcid = pcid.0.into()
}
InvPicdCommand::All => kind = 2,
InvPicdCommand::AllExceptGlobal => kind = 3,
InvPcidCommand::All => kind = 2,
InvPcidCommand::AllExceptGlobal => kind = 3,
}

unsafe {
@@ -131,7 +133,7 @@ pub unsafe fn flush_pcid(command: InvPicdCommand) {
///
/// // Broadcast flushing some pages to all logical processors.
/// let start: Page = Page::from_start_address(VirtAddr::new(0xf000_0000)).unwrap();
/// let pages = Page::range(start, start + 3);
/// let pages = start..start + 3;
/// invlpgb.build().pages(pages).include_global().flush();
///
/// // Wait for all logical processors to respond.
@@ -223,7 +225,7 @@ where
S: NotGiantPageSize,
{
invlpgb: &'a Invlpgb,
page_range: Option<PageRange<S>>,
page_range: Option<Range<Page<S>>>,
pcid: Option<Pcid>,
asid: Option<u16>,
include_global: bool,
@@ -239,7 +241,7 @@ where
///
/// If the range doesn't fit within `invlpgb_count_max`, `invlpgb` is
/// executed multiple times.
pub fn pages<T>(self, page_range: PageRange<T>) -> InvlpgbFlushBuilder<'a, T>
pub fn pages<T>(self, page_range: Range<Page<T>>) -> InvlpgbFlushBuilder<'a, T>
where
T: NotGiantPageSize,
{
@@ -307,17 +309,17 @@ where

/// Execute the flush.
pub fn flush(&self) {
if let Some(mut pages) = self.page_range {
if let Some(mut pages) = self.page_range.clone() {
while !pages.is_empty() {
// Calculate out how many pages we still need to flush.
let count = Page::<S>::steps_between_impl(&pages.start, &pages.end).unwrap();
let count = Page::<S>::steps_between_impl(&pages.start, &pages.end).0;

// Make sure that we never jump the gap in the address space when flushing.
let second_half_start =
Page::<S>::containing_address(VirtAddr::new(0xffff_8000_0000_0000));
let count = if pages.start < second_half_start {
let count_to_second_half =
Page::steps_between_impl(&pages.start, &second_half_start).unwrap();
Page::steps_between_impl(&pages.start, &second_half_start).0;
cmp::min(count, count_to_second_half)
} else {
count
2 changes: 0 additions & 2 deletions src/lib.rs
Original file line number Diff line number Diff line change
@@ -2,8 +2,6 @@
//! and access to various system registers.
#![cfg_attr(not(test), no_std)]
#![cfg_attr(feature = "const_fn", feature(const_mut_refs))] // GDT::append()
#![cfg_attr(feature = "asm_const", feature(asm_const))]
#![cfg_attr(feature = "abi_x86_interrupt", feature(abi_x86_interrupt))]
#![cfg_attr(feature = "step_trait", feature(step_trait))]
#![cfg_attr(feature = "doc_auto_cfg", feature(doc_auto_cfg))]
16 changes: 8 additions & 8 deletions src/registers/model_specific.rs
Original file line number Diff line number Diff line change
@@ -357,11 +357,11 @@ mod x86_64 {
///
/// # Returns
/// - Field 1 (SYSRET): The CS selector is set to this field + 16. SS.Sel is set to
/// this field + 8. Because SYSRET always returns to CPL 3, the
/// RPL bits 1:0 should be initialized to 11b.
/// this field + 8. Because SYSRET always returns to CPL 3, the
/// RPL bits 1:0 should be initialized to 11b.
/// - Field 2 (SYSCALL): This field is copied directly into CS.Sel. SS.Sel is set to
/// this field + 8. Because SYSCALL always switches to CPL 0, the RPL bits
/// 33:32 should be initialized to 00b.
/// this field + 8. Because SYSCALL always switches to CPL 0, the RPL bits
/// 33:32 should be initialized to 00b.
#[inline]
pub fn read_raw() -> (u16, u16) {
let msr_value = unsafe { Self::MSR.read() };
@@ -398,11 +398,11 @@ mod x86_64 {
///
/// # Parameters
/// - sysret: The CS selector is set to this field + 16. SS.Sel is set to
/// this field + 8. Because SYSRET always returns to CPL 3, the
/// RPL bits 1:0 should be initialized to 11b.
/// this field + 8. Because SYSRET always returns to CPL 3, the
/// RPL bits 1:0 should be initialized to 11b.
/// - syscall: This field is copied directly into CS.Sel. SS.Sel is set to
/// this field + 8. Because SYSCALL always switches to CPL 0, the RPL bits
/// 33:32 should be initialized to 00b.
/// this field + 8. Because SYSCALL always switches to CPL 0, the RPL bits
/// 33:32 should be initialized to 00b.
///
/// # Safety
///
4 changes: 2 additions & 2 deletions src/structures/gdt.rs
Original file line number Diff line number Diff line change
@@ -193,7 +193,7 @@ impl<const MAX: usize> GlobalDescriptorTable<MAX> {
///
/// Panics if the GDT doesn't have enough free entries.
#[inline]
#[cfg_attr(feature = "const_fn", rustversion::attr(all(), const))]
#[rustversion::attr(since(1.83), const)]
pub fn append(&mut self, entry: Descriptor) -> SegmentSelector {
let index = match entry {
Descriptor::UserSegment(value) => {
@@ -246,7 +246,7 @@ impl<const MAX: usize> GlobalDescriptorTable<MAX> {
}

#[inline]
#[cfg_attr(feature = "const_fn", rustversion::attr(all(), const))]
#[rustversion::attr(since(1.83), const)]
fn push(&mut self, value: u64) -> usize {
let index = self.len;
self.table[index] = Entry::new(value);
114 changes: 96 additions & 18 deletions src/structures/idt.rs
Original file line number Diff line number Diff line change
@@ -24,6 +24,7 @@ use crate::registers::rflags::RFlags;
use crate::{PrivilegeLevel, VirtAddr};
use bit_field::BitField;
use bitflags::bitflags;
use core::convert::TryFrom;
use core::fmt;
use core::marker::PhantomData;
use core::ops::Bound::{Excluded, Included, Unbounded};
@@ -153,9 +154,9 @@ pub struct InterruptDescriptorTable {
/// is enabled.
/// - Execution of any legacy SSE instruction when `CR4.OSFXSR` is cleared to 0.
/// - Execution of any SSE instruction (uses `YMM`/`XMM` registers), or 64-bit media
/// instruction (uses `MMXTM` registers) when `CR0.EM` = 1.
/// instruction (uses `MMXTM` registers) when `CR0.EM` = 1.
/// - Execution of any SSE floating-point instruction (uses `YMM`/`XMM` registers) that
/// causes a numeric exception when `CR4.OSXMMEXCPT` = 0.
/// causes a numeric exception when `CR4.OSXMMEXCPT` = 0.
/// - Use of the `DR4` or `DR5` debug registers when `CR4.DE` = 1.
/// - Execution of `RSM` when not in `SMM` mode.
///
@@ -503,7 +504,7 @@ impl InterruptDescriptorTable {
///
/// - `self` is never destroyed.
/// - `self` always stays at the same memory location. It is recommended to wrap it in
/// a `Box`.
/// a `Box`.
///
#[cfg(all(feature = "instructions", target_arch = "x86_64"))]
#[inline]
@@ -712,52 +713,82 @@ impl<T> PartialEq for Entry<T> {
/// A handler function for an interrupt or an exception without error code.
///
/// This type alias is only usable with the `abi_x86_interrupt` feature enabled.
#[cfg(feature = "abi_x86_interrupt")]
#[cfg(all(
any(target_arch = "x86", target_arch = "x86_64"),
feature = "abi_x86_interrupt"
))]
pub type HandlerFunc = extern "x86-interrupt" fn(InterruptStackFrame);
/// This type is not usable without the `abi_x86_interrupt` feature.
#[cfg(not(feature = "abi_x86_interrupt"))]
#[cfg(not(all(
any(target_arch = "x86", target_arch = "x86_64"),
feature = "abi_x86_interrupt"
)))]
#[derive(Copy, Clone, Debug)]
pub struct HandlerFunc(());

/// A handler function for an exception that pushes an error code.
///
/// This type alias is only usable with the `abi_x86_interrupt` feature enabled.
#[cfg(feature = "abi_x86_interrupt")]
#[cfg(all(
any(target_arch = "x86", target_arch = "x86_64"),
feature = "abi_x86_interrupt"
))]
pub type HandlerFuncWithErrCode = extern "x86-interrupt" fn(InterruptStackFrame, error_code: u64);
/// This type is not usable without the `abi_x86_interrupt` feature.
#[cfg(not(feature = "abi_x86_interrupt"))]
#[cfg(not(all(
any(target_arch = "x86", target_arch = "x86_64"),
feature = "abi_x86_interrupt"
)))]
#[derive(Copy, Clone, Debug)]
pub struct HandlerFuncWithErrCode(());

/// A page fault handler function that pushes a page fault error code.
///
/// This type alias is only usable with the `abi_x86_interrupt` feature enabled.
#[cfg(feature = "abi_x86_interrupt")]
#[cfg(all(
any(target_arch = "x86", target_arch = "x86_64"),
feature = "abi_x86_interrupt"
))]
pub type PageFaultHandlerFunc =
extern "x86-interrupt" fn(InterruptStackFrame, error_code: PageFaultErrorCode);
/// This type is not usable without the `abi_x86_interrupt` feature.
#[cfg(not(feature = "abi_x86_interrupt"))]
#[cfg(not(all(
any(target_arch = "x86", target_arch = "x86_64"),
feature = "abi_x86_interrupt"
)))]
#[derive(Copy, Clone, Debug)]
pub struct PageFaultHandlerFunc(());

/// A handler function that must not return, e.g. for a machine check exception.
///
/// This type alias is only usable with the `abi_x86_interrupt` feature enabled.
#[cfg(feature = "abi_x86_interrupt")]
#[cfg(all(
any(target_arch = "x86", target_arch = "x86_64"),
feature = "abi_x86_interrupt"
))]
pub type DivergingHandlerFunc = extern "x86-interrupt" fn(InterruptStackFrame) -> !;
/// This type is not usable without the `abi_x86_interrupt` feature.
#[cfg(not(feature = "abi_x86_interrupt"))]
#[cfg(not(all(
any(target_arch = "x86", target_arch = "x86_64"),
feature = "abi_x86_interrupt"
)))]
#[derive(Copy, Clone, Debug)]
pub struct DivergingHandlerFunc(());

/// A handler function with an error code that must not return, e.g. for a double fault exception.
///
/// This type alias is only usable with the `abi_x86_interrupt` feature enabled.
#[cfg(feature = "abi_x86_interrupt")]
#[cfg(all(
any(target_arch = "x86", target_arch = "x86_64"),
feature = "abi_x86_interrupt"
))]
pub type DivergingHandlerFuncWithErrCode =
extern "x86-interrupt" fn(InterruptStackFrame, error_code: u64) -> !;
/// This type is not usable without the `abi_x86_interrupt` feature.
#[cfg(not(feature = "abi_x86_interrupt"))]
#[cfg(not(all(
any(target_arch = "x86", target_arch = "x86_64"),
feature = "abi_x86_interrupt"
)))]
#[derive(Copy, Clone, Debug)]
pub struct DivergingHandlerFuncWithErrCode(());

@@ -853,7 +884,10 @@ pub unsafe trait HandlerFuncType {

macro_rules! impl_handler_func_type {
($f:ty) => {
#[cfg(feature = "abi_x86_interrupt")]
#[cfg(all(
any(target_arch = "x86", target_arch = "x86_64"),
feature = "abi_x86_interrupt"
))]
unsafe impl HandlerFuncType for $f {
#[inline]
fn to_virt_addr(self) -> VirtAddr {
@@ -1328,6 +1362,52 @@ pub enum ExceptionVector {
Security = 0x1E,
}

/// Exception vector number is invalid
#[derive(Debug)]
pub struct InvalidExceptionVectorNumber(u8);

impl fmt::Display for InvalidExceptionVectorNumber {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{} is not a valid exception vector", self.0)
}
}

impl TryFrom<u8> for ExceptionVector {
type Error = InvalidExceptionVectorNumber;

/// Tries to convert the exception vector number to [`ExceptionVector`]
///
/// Fails if exception vector number is Coprocessor Segment Overrun, reserved or not exception vector number
fn try_from(exception_vector_number: u8) -> Result<Self, Self::Error> {
match exception_vector_number {
0x00 => Ok(Self::Division),
0x01 => Ok(Self::Debug),
0x02 => Ok(Self::NonMaskableInterrupt),
0x03 => Ok(Self::Breakpoint),
0x04 => Ok(Self::Overflow),
0x05 => Ok(Self::BoundRange),
0x06 => Ok(Self::InvalidOpcode),
0x07 => Ok(Self::DeviceNotAvailable),
0x08 => Ok(Self::Double),
0x0A => Ok(Self::InvalidTss),
0x0B => Ok(Self::SegmentNotPresent),
0x0C => Ok(Self::Stack),
0x0D => Ok(Self::GeneralProtection),
0x0E => Ok(Self::Page),
0x10 => Ok(Self::X87FloatingPoint),
0x11 => Ok(Self::AlignmentCheck),
0x12 => Ok(Self::MachineCheck),
0x13 => Ok(Self::SimdFloatingPoint),
0x14 => Ok(Self::Virtualization),
0x15 => Ok(Self::ControlProtection),
0x1C => Ok(Self::HypervisorInjection),
0x1D => Ok(Self::VmmCommunication),
0x1E => Ok(Self::Security),
_ => Err(InvalidExceptionVectorNumber(exception_vector_number)),
}
}
}

#[cfg(all(
feature = "instructions",
feature = "abi_x86_interrupt",
@@ -1642,7 +1722,7 @@ mod test {

#[test]
fn entry_derive_test() {
fn foo(_: impl Clone + Copy + PartialEq + fmt::Debug) {}
fn foo(_: impl Copy + PartialEq + fmt::Debug) {}

foo(Entry::<HandlerFuncWithErrCode> {
pointer_low: 0,
@@ -1667,9 +1747,7 @@ mod test {
});

unsafe {
frame
.as_mut()
.update(|f| f.instruction_pointer = f.instruction_pointer + 2u64);
frame.as_mut().update(|f| f.instruction_pointer += 2u64);
}
}
}
2 changes: 1 addition & 1 deletion src/structures/mod.rs
Original file line number Diff line number Diff line change
@@ -15,7 +15,7 @@ pub mod tss;
#[derive(Debug, Clone, Copy)]
#[repr(C, packed(2))]
pub struct DescriptorTablePointer {
/// Size of the DT.
/// Size of the DT in bytes - 1.
pub limit: u16,
/// Pointer to the memory region containing the DT.
pub base: VirtAddr,
122 changes: 46 additions & 76 deletions src/structures/paging/frame.rs
Original file line number Diff line number Diff line change
@@ -5,7 +5,7 @@ use crate::structures::paging::page::{PageSize, Size4KiB};
use crate::PhysAddr;
use core::fmt;
use core::marker::PhantomData;
use core::ops::{Add, AddAssign, Sub, SubAssign};
use core::ops::{Add, AddAssign, Range, RangeInclusive, Sub, SubAssign};

/// A physical memory frame.
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
@@ -21,8 +21,9 @@ impl<S: PageSize> PhysFrame<S> {
///
/// Returns an error if the address is not correctly aligned (i.e. is not a valid frame start).
#[inline]
#[rustversion::attr(since(1.61), const)]
pub fn from_start_address(address: PhysAddr) -> Result<Self, AddressNotAligned> {
if !address.is_aligned(S::SIZE) {
if !address.is_aligned_u64(S::SIZE) {
return Err(AddressNotAligned);
}

@@ -46,9 +47,10 @@ impl<S: PageSize> PhysFrame<S> {

/// Returns the frame that contains the given physical address.
#[inline]
#[rustversion::attr(since(1.61), const)]
pub fn containing_address(address: PhysAddr) -> Self {
PhysFrame {
start_address: address.align_down(S::SIZE),
start_address: address.align_down_u64(S::SIZE),
size: PhantomData,
}
}
@@ -66,20 +68,6 @@ impl<S: PageSize> PhysFrame<S> {
pub fn size(self) -> u64 {
S::SIZE
}

/// Returns a range of frames, exclusive `end`.
#[inline]
#[rustversion::attr(since(1.61), const)]
pub fn range(start: PhysFrame<S>, end: PhysFrame<S>) -> PhysFrameRange<S> {
PhysFrameRange { start, end }
}

/// Returns a range of frames, inclusive `end`.
#[inline]
#[rustversion::attr(since(1.61), const)]
pub fn range_inclusive(start: PhysFrame<S>, end: PhysFrame<S>) -> PhysFrameRangeInclusive<S> {
PhysFrameRangeInclusive { start, end }
}
}

impl<S: PageSize> fmt::Debug for PhysFrame<S> {
@@ -130,86 +118,68 @@ impl<S: PageSize> Sub<PhysFrame<S>> for PhysFrame<S> {
}
}

/// An range of physical memory frames, exclusive the upper bound.
#[derive(Clone, Copy, PartialEq, Eq, Hash)]
#[repr(C)]
pub struct PhysFrameRange<S: PageSize = Size4KiB> {
/// The start of the range, inclusive.
pub start: PhysFrame<S>,
/// The end of the range, exclusive.
pub end: PhysFrame<S>,
/// Helper trait to get the number of frames in the range.
#[allow(clippy::len_without_is_empty)]
pub trait PhysFrameRangeLen {
/// Returns the number of frames in the range.
fn len(&self) -> u64;
}

impl<S: PageSize> PhysFrameRange<S> {
/// Returns whether the range contains no frames.
impl<S: PageSize> PhysFrameRangeLen for Range<PhysFrame<S>> {
#[inline]
pub fn is_empty(&self) -> bool {
self.start >= self.end
fn len(&self) -> u64 {
if !self.is_empty() {
self.end - self.start
} else {
0
}
}
}

impl<S: PageSize> Iterator for PhysFrameRange<S> {
type Item = PhysFrame<S>;

impl<S: PageSize> PhysFrameRangeLen for RangeInclusive<PhysFrame<S>> {
#[inline]
fn next(&mut self) -> Option<Self::Item> {
if self.start < self.end {
let frame = self.start;
self.start += 1;
Some(frame)
fn len(&self) -> u64 {
if !self.is_empty() {
*self.end() - *self.start() + 1
} else {
None
0
}
}
}

impl<S: PageSize> fmt::Debug for PhysFrameRange<S> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("PhysFrameRange")
.field("start", &self.start)
.field("end", &self.end)
.finish()
}
}

/// An range of physical memory frames, inclusive the upper bound.
#[derive(Clone, Copy, PartialEq, Eq, Hash)]
#[repr(C)]
pub struct PhysFrameRangeInclusive<S: PageSize = Size4KiB> {
/// The start of the range, inclusive.
pub start: PhysFrame<S>,
/// The start of the range, inclusive.
pub end: PhysFrame<S>,
/// Helper trait to get the size in bytes of all frames within the range.
pub trait PhysFrameRangeSize {
/// Returns the size in bytes of all frames within the range.
fn size(&self) -> u64;
}

impl<S: PageSize> PhysFrameRangeInclusive<S> {
/// Returns whether the range contains no frames.
impl<S: PageSize> PhysFrameRangeSize for Range<PhysFrame<S>> {
#[inline]
pub fn is_empty(&self) -> bool {
self.start > self.end
fn size(&self) -> u64 {
S::SIZE * self.len()
}
}

impl<S: PageSize> Iterator for PhysFrameRangeInclusive<S> {
type Item = PhysFrame<S>;

impl<S: PageSize> PhysFrameRangeSize for RangeInclusive<PhysFrame<S>> {
#[inline]
fn next(&mut self) -> Option<Self::Item> {
if self.start <= self.end {
let frame = self.start;
self.start += 1;
Some(frame)
} else {
None
}
fn size(&self) -> u64 {
S::SIZE * self.len()
}
}

impl<S: PageSize> fmt::Debug for PhysFrameRangeInclusive<S> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("PhysFrameRangeInclusive")
.field("start", &self.start)
.field("end", &self.end)
.finish()
#[cfg(test)]
mod tests {
use super::*;
#[test]
pub fn test_frame_range_len() {
let start_addr = PhysAddr::new(0xdead_beaf);
let start = PhysFrame::<Size4KiB>::containing_address(start_addr);
let end = start + 50;

let range = start..end;
assert_eq!(range.len(), 50);

let range_inclusive = start..=end;
assert_eq!(range_inclusive.len(), 51);
}
}
32 changes: 15 additions & 17 deletions src/structures/paging/mapper/mapped_page_table.rs
Original file line number Diff line number Diff line change
@@ -150,7 +150,7 @@ impl<'a, P: PageTableFrameMapping> MappedPageTable<'a, P> {
}
}

impl<'a, P: PageTableFrameMapping> Mapper<Size1GiB> for MappedPageTable<'a, P> {
impl<P: PageTableFrameMapping> Mapper<Size1GiB> for MappedPageTable<'_, P> {
#[inline]
unsafe fn map_to_with_table_flags<A>(
&mut self,
@@ -258,7 +258,7 @@ impl<'a, P: PageTableFrameMapping> Mapper<Size1GiB> for MappedPageTable<'a, P> {
}
}

impl<'a, P: PageTableFrameMapping> Mapper<Size2MiB> for MappedPageTable<'a, P> {
impl<P: PageTableFrameMapping> Mapper<Size2MiB> for MappedPageTable<'_, P> {
#[inline]
unsafe fn map_to_with_table_flags<A>(
&mut self,
@@ -386,7 +386,7 @@ impl<'a, P: PageTableFrameMapping> Mapper<Size2MiB> for MappedPageTable<'a, P> {
}
}

impl<'a, P: PageTableFrameMapping> Mapper<Size4KiB> for MappedPageTable<'a, P> {
impl<P: PageTableFrameMapping> Mapper<Size4KiB> for MappedPageTable<'_, P> {
#[inline]
unsafe fn map_to_with_table_flags<A>(
&mut self,
@@ -530,7 +530,7 @@ impl<'a, P: PageTableFrameMapping> Mapper<Size4KiB> for MappedPageTable<'a, P> {
}
}

impl<'a, P: PageTableFrameMapping> Translate for MappedPageTable<'a, P> {
impl<P: PageTableFrameMapping> Translate for MappedPageTable<'_, P> {
#[allow(clippy::inconsistent_digit_grouping)]
fn translate(&self, addr: VirtAddr) -> TranslateResult {
let p4 = &self.level_4_table;
@@ -594,26 +594,24 @@ impl<'a, P: PageTableFrameMapping> Translate for MappedPageTable<'a, P> {
}
}

impl<'a, P: PageTableFrameMapping> CleanUp for MappedPageTable<'a, P> {
impl<P: PageTableFrameMapping> CleanUp for MappedPageTable<'_, P> {
#[inline]
unsafe fn clean_up<D>(&mut self, frame_deallocator: &mut D)
where
D: FrameDeallocator<Size4KiB>,
{
unsafe {
self.clean_up_addr_range(
PageRangeInclusive {
start: Page::from_start_address(VirtAddr::new(0)).unwrap(),
end: Page::from_start_address(VirtAddr::new(0xffff_ffff_ffff_f000)).unwrap(),
},
Page::from_start_address(VirtAddr::new(0)).unwrap()
..=Page::from_start_address(VirtAddr::new(0xffff_ffff_ffff_f000)).unwrap(),
frame_deallocator,
)
}
}

unsafe fn clean_up_addr_range<D>(
&mut self,
range: PageRangeInclusive,
range: RangeInclusive<Page>,
frame_deallocator: &mut D,
) where
D: FrameDeallocator<Size4KiB>,
@@ -622,20 +620,20 @@ impl<'a, P: PageTableFrameMapping> CleanUp for MappedPageTable<'a, P> {
page_table: &mut PageTable,
page_table_walker: &PageTableWalker<P>,
level: PageTableLevel,
range: PageRangeInclusive,
range: RangeInclusive<Page>,
frame_deallocator: &mut impl FrameDeallocator<Size4KiB>,
) -> bool {
if range.is_empty() {
return false;
}

let table_addr = range
.start
.start()
.start_address()
.align_down(level.table_address_space_alignment());

let start = range.start.page_table_index(level);
let end = range.end.page_table_index(level);
let start = range.start().page_table_index(level);
let end = range.end().page_table_index(level);

if let Some(next_level) = level.next_lower_level() {
let offset_per_entry = level.entry_address_space_alignment();
@@ -653,15 +651,15 @@ impl<'a, P: PageTableFrameMapping> CleanUp for MappedPageTable<'a, P> {
.unwrap();
let end = start + (offset_per_entry - 1);
let start = Page::<Size4KiB>::containing_address(start);
let start = start.max(range.start);
let start = start.max(*range.start());
let end = Page::<Size4KiB>::containing_address(end);
let end = end.min(range.end);
let end = end.min(*range.end());
unsafe {
if clean_up(
page_table,
page_table_walker,
next_level,
Page::range_inclusive(start, end),
start..=end,
frame_deallocator,
) {
let frame = entry.frame().unwrap();
10 changes: 4 additions & 6 deletions src/structures/paging/mapper/mod.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
//! Abstractions for reading and modifying the mapping of pages.
use core::ops::RangeInclusive;

pub use self::mapped_page_table::{MappedPageTable, PageTableFrameMapping};
#[cfg(target_pointer_width = "64")]
pub use self::offset_page_table::OffsetPageTable;
@@ -8,7 +10,6 @@ pub use self::recursive_page_table::{InvalidPageTable, RecursivePageTable};

use crate::structures::paging::{
frame_alloc::{FrameAllocator, FrameDeallocator},
page::PageRangeInclusive,
page_table::PageTableFlags,
Page, PageSize, PhysFrame, Size1GiB, Size2MiB, Size4KiB,
};
@@ -512,10 +513,7 @@ pub trait CleanUp {
/// # }};
/// # unsafe fn test(page_table: &mut impl CleanUp, frame_deallocator: &mut impl FrameDeallocator<Size4KiB>) {
/// // clean up all page tables in the lower half of the address space
/// let lower_half = Page::range_inclusive(
/// Page::containing_address(VirtAddr::new(0)),
/// Page::containing_address(VirtAddr::new(0x0000_7fff_ffff_ffff)),
/// );
/// let lower_half = Page::containing_address(VirtAddr::new(0))..=Page::containing_address(VirtAddr::new(0x0000_7fff_ffff_ffff));
/// page_table.clean_up_addr_range(lower_half, frame_deallocator);
/// # }
/// ```
@@ -527,7 +525,7 @@ pub trait CleanUp {
/// (e.g. no reference counted page tables or reusing the same page tables for different virtual addresses ranges in the same page table).
unsafe fn clean_up_addr_range<D>(
&mut self,
range: PageRangeInclusive,
range: RangeInclusive<Page>,
frame_deallocator: &mut D,
) where
D: FrameDeallocator<Size4KiB>;
12 changes: 6 additions & 6 deletions src/structures/paging/mapper/offset_page_table.rs
Original file line number Diff line number Diff line change
@@ -65,7 +65,7 @@ unsafe impl PageTableFrameMapping for PhysOffset {

// delegate all trait implementations to inner

impl<'a> Mapper<Size1GiB> for OffsetPageTable<'a> {
impl Mapper<Size1GiB> for OffsetPageTable<'_> {
#[inline]
unsafe fn map_to_with_table_flags<A>(
&mut self,
@@ -134,7 +134,7 @@ impl<'a> Mapper<Size1GiB> for OffsetPageTable<'a> {
}
}

impl<'a> Mapper<Size2MiB> for OffsetPageTable<'a> {
impl Mapper<Size2MiB> for OffsetPageTable<'_> {
#[inline]
unsafe fn map_to_with_table_flags<A>(
&mut self,
@@ -203,7 +203,7 @@ impl<'a> Mapper<Size2MiB> for OffsetPageTable<'a> {
}
}

impl<'a> Mapper<Size4KiB> for OffsetPageTable<'a> {
impl Mapper<Size4KiB> for OffsetPageTable<'_> {
#[inline]
unsafe fn map_to_with_table_flags<A>(
&mut self,
@@ -272,14 +272,14 @@ impl<'a> Mapper<Size4KiB> for OffsetPageTable<'a> {
}
}

impl<'a> Translate for OffsetPageTable<'a> {
impl Translate for OffsetPageTable<'_> {
#[inline]
fn translate(&self, addr: VirtAddr) -> TranslateResult {
self.inner.translate(addr)
}
}

impl<'a> CleanUp for OffsetPageTable<'a> {
impl CleanUp for OffsetPageTable<'_> {
#[inline]
unsafe fn clean_up<D>(&mut self, frame_deallocator: &mut D)
where
@@ -291,7 +291,7 @@ impl<'a> CleanUp for OffsetPageTable<'a> {
#[inline]
unsafe fn clean_up_addr_range<D>(
&mut self,
range: PageRangeInclusive,
range: RangeInclusive<Page>,
frame_deallocator: &mut D,
) where
D: FrameDeallocator<Size4KiB>,
32 changes: 15 additions & 17 deletions src/structures/paging/mapper/recursive_page_table.rs
Original file line number Diff line number Diff line change
@@ -299,7 +299,7 @@ impl<'a> RecursivePageTable<'a> {
}
}

impl<'a> Mapper<Size1GiB> for RecursivePageTable<'a> {
impl Mapper<Size1GiB> for RecursivePageTable<'_> {
#[inline]
unsafe fn map_to_with_table_flags<A>(
&mut self,
@@ -419,7 +419,7 @@ impl<'a> Mapper<Size1GiB> for RecursivePageTable<'a> {
}
}

impl<'a> Mapper<Size2MiB> for RecursivePageTable<'a> {
impl Mapper<Size2MiB> for RecursivePageTable<'_> {
#[inline]
unsafe fn map_to_with_table_flags<A>(
&mut self,
@@ -574,7 +574,7 @@ impl<'a> Mapper<Size2MiB> for RecursivePageTable<'a> {
}
}

impl<'a> Mapper<Size4KiB> for RecursivePageTable<'a> {
impl Mapper<Size4KiB> for RecursivePageTable<'_> {
#[inline]
unsafe fn map_to_with_table_flags<A>(
&mut self,
@@ -763,7 +763,7 @@ impl<'a> Mapper<Size4KiB> for RecursivePageTable<'a> {
}
}

impl<'a> Translate for RecursivePageTable<'a> {
impl Translate for RecursivePageTable<'_> {
#[allow(clippy::inconsistent_digit_grouping)]
fn translate(&self, addr: VirtAddr) -> TranslateResult {
let page = Page::containing_address(addr);
@@ -836,26 +836,24 @@ impl<'a> Translate for RecursivePageTable<'a> {
}
}

impl<'a> CleanUp for RecursivePageTable<'a> {
impl CleanUp for RecursivePageTable<'_> {
#[inline]
unsafe fn clean_up<D>(&mut self, frame_deallocator: &mut D)
where
D: FrameDeallocator<Size4KiB>,
{
unsafe {
self.clean_up_addr_range(
PageRangeInclusive {
start: Page::from_start_address(VirtAddr::new(0)).unwrap(),
end: Page::from_start_address(VirtAddr::new(0xffff_ffff_ffff_f000)).unwrap(),
},
Page::from_start_address(VirtAddr::new(0)).unwrap()
..=Page::from_start_address(VirtAddr::new(0xffff_ffff_ffff_f000)).unwrap(),
frame_deallocator,
)
}
}

unsafe fn clean_up_addr_range<D>(
&mut self,
range: PageRangeInclusive,
range: RangeInclusive<Page>,
frame_deallocator: &mut D,
) where
D: FrameDeallocator<Size4KiB>,
@@ -864,20 +862,20 @@ impl<'a> CleanUp for RecursivePageTable<'a> {
recursive_index: PageTableIndex,
page_table: &mut PageTable,
level: PageTableLevel,
range: PageRangeInclusive,
range: RangeInclusive<Page>,
frame_deallocator: &mut impl FrameDeallocator<Size4KiB>,
) -> bool {
if range.is_empty() {
return false;
}

let table_addr = range
.start
.start()
.start_address()
.align_down(level.table_address_space_alignment());

let start = range.start.page_table_index(level);
let end = range.end.page_table_index(level);
let start = range.start().page_table_index(level);
let end = range.end().page_table_index(level);

if let Some(next_level) = level.next_lower_level() {
let offset_per_entry = level.entry_address_space_alignment();
@@ -898,17 +896,17 @@ impl<'a> CleanUp for RecursivePageTable<'a> {
.unwrap();
let end = start + (offset_per_entry - 1);
let start = Page::<Size4KiB>::containing_address(start);
let start = start.max(range.start);
let start = start.max(*range.start());
let end = Page::<Size4KiB>::containing_address(end);
let end = end.min(range.end);
let end = end.min(*range.end());
let page_table =
[p1_ptr, p2_ptr, p3_ptr][level as usize - 2](start, recursive_index);
let page_table = unsafe { &mut *page_table };
if clean_up(
recursive_index,
page_table,
next_level,
Page::range_inclusive(start, end),
start..=end,
frame_deallocator,
) {
entry.set_unused();
390 changes: 293 additions & 97 deletions src/structures/paging/page.rs

Large diffs are not rendered by default.

4 changes: 2 additions & 2 deletions src/structures/paging/page_table.rs
Original file line number Diff line number Diff line change
@@ -353,8 +353,8 @@ impl From<PageTableIndex> for usize {
#[cfg(feature = "step_trait")]
impl Step for PageTableIndex {
#[inline]
fn steps_between(start: &Self, end: &Self) -> Option<usize> {
end.0.checked_sub(start.0).map(usize::from)
fn steps_between(start: &Self, end: &Self) -> (usize, Option<usize>) {
Step::steps_between(&start.0, &end.0)
}

#[inline]
5 changes: 3 additions & 2 deletions src/structures/tss.rs
Original file line number Diff line number Diff line change
@@ -5,16 +5,17 @@ use core::mem::size_of;

/// In 64-bit mode the TSS holds information that is not
/// directly related to the task-switch mechanism,
/// but is used for finding kernel level stack
/// if interrupts arrive while in kernel mode.
/// but is used for stack switching when an interrupt or exception occurs.
#[derive(Debug, Clone, Copy)]
#[repr(C, packed(4))]
pub struct TaskStateSegment {
reserved_1: u32,
/// The full 64-bit canonical forms of the stack pointers (RSP) for privilege levels 0-2.
/// The stack pointers used when a privilege level change occurs from a lower privilege level to a higher one.
pub privilege_stack_table: [VirtAddr; 3],
reserved_2: u64,
/// The full 64-bit canonical forms of the interrupt stack table (IST) pointers.
/// The stack pointers used when an entry in the Interrupt Descriptor Table has an IST value other than 0.
pub interrupt_stack_table: [VirtAddr; 7],
reserved_3: u64,
reserved_4: u16,
2 changes: 1 addition & 1 deletion testing/src/gdt.rs
Original file line number Diff line number Diff line change
@@ -13,7 +13,7 @@ lazy_static! {
const STACK_SIZE: usize = 4096;
static mut STACK: [u8; STACK_SIZE] = [0; STACK_SIZE];

let stack_start = VirtAddr::from_ptr(unsafe { ptr::addr_of!(STACK) });
let stack_start = VirtAddr::from_ptr(ptr::addr_of!(STACK));
let stack_end = stack_start + STACK_SIZE as u64;
stack_end
};