Skip to content

add clean_up and clean_up_with_filter #264

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 20 commits into from
Sep 5, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 7 additions & 0 deletions src/addr.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
use core::fmt;
use core::ops::{Add, AddAssign, Sub, SubAssign};

use crate::structures::paging::page_table::PageTableLevel;
use crate::structures::paging::{PageOffset, PageTableIndex};
use bit_field::BitField;

Expand Down Expand Up @@ -198,6 +199,12 @@ impl VirtAddr {
pub const fn p4_index(self) -> PageTableIndex {
PageTableIndex::new_truncate((self.0 >> 12 >> 9 >> 9 >> 9) as u16)
}

/// Returns the 9-bit level page table index.
#[inline]
pub const fn page_table_index(self, level: PageTableLevel) -> PageTableIndex {
PageTableIndex::new_truncate((self.0 >> 12 >> ((level as u8 - 1) * 9)) as u16)
}
}

impl fmt::Debug for VirtAddr {
Expand Down
1 change: 1 addition & 0 deletions src/instructions/tlb.rs
Original file line number Diff line number Diff line change
Expand Up @@ -72,6 +72,7 @@ impl Pcid {
/// Invalidate the given address in the TLB using the `invpcid` instruction.
///
/// ## Safety
///
/// This function is unsafe as it requires CPUID.(EAX=07H, ECX=0H):EBX.INVPCID to be 1.
#[inline]
pub unsafe fn flush_pcid(command: InvPicdCommand) {
Expand Down
4 changes: 4 additions & 0 deletions src/registers/control.rs
Original file line number Diff line number Diff line change
Expand Up @@ -301,6 +301,7 @@ mod x86_64 {
/// Write a new P4 table address into the CR3 register.
///
/// ## Safety
///
/// Changing the level 4 page table is unsafe, because it's possible to violate memory safety by
/// changing the page mapping.
#[inline]
Expand All @@ -311,6 +312,7 @@ mod x86_64 {
/// Write a new P4 table address into the CR3 register.
///
/// ## Safety
///
/// Changing the level 4 page table is unsafe, because it's possible to violate memory safety by
/// changing the page mapping.
/// [`Cr4Flags::PCID`] must be set before calling this method.
Expand All @@ -322,6 +324,7 @@ mod x86_64 {
/// Write a new P4 table address into the CR3 register.
///
/// ## Safety
///
/// Changing the level 4 page table is unsafe, because it's possible to violate memory safety by
/// changing the page mapping.
#[inline]
Expand Down Expand Up @@ -400,6 +403,7 @@ mod x86_64 {
/// Updates CR4 flags.
///
/// Preserves the value of reserved fields.
///
/// ## Safety
///
/// This function is unsafe because it's possible to violate memory
Expand Down
1 change: 1 addition & 0 deletions src/structures/idt.rs
Original file line number Diff line number Diff line change
Expand Up @@ -789,6 +789,7 @@ impl EntryOptions {
/// This function panics if the index is not in the range 0..7.
///
/// ## Safety
///
/// This function is unsafe because the caller must ensure that the passed stack index is
/// valid and not used by other interrupts. Otherwise, memory safety violations are possible.
#[inline]
Expand Down
90 changes: 87 additions & 3 deletions src/structures/paging/mapper/mapped_page_table.rs
Original file line number Diff line number Diff line change
@@ -1,9 +1,9 @@
use crate::structures::paging::{
frame::PhysFrame,
frame_alloc::FrameAllocator,
frame_alloc::{FrameAllocator, FrameDeallocator},
mapper::*,
page::{AddressNotAligned, Page, Size1GiB, Size2MiB, Size4KiB},
page_table::{FrameError, PageTable, PageTableEntry, PageTableFlags},
page::{AddressNotAligned, Page, PageRangeInclusive, Size1GiB, Size2MiB, Size4KiB},
page_table::{FrameError, PageTable, PageTableEntry, PageTableFlags, PageTableLevel},
};

/// A Mapper implementation that relies on a PhysAddr to VirtAddr conversion function.
Expand Down Expand Up @@ -584,6 +584,90 @@ impl<'a, P: PageTableFrameMapping> Translate for MappedPageTable<'a, P> {
}
}

impl<'a, P: PageTableFrameMapping> CleanUp for MappedPageTable<'a, P> {
#[inline]
unsafe fn clean_up<D>(&mut self, frame_deallocator: &mut D)
where
D: FrameDeallocator<Size4KiB>,
{
self.clean_up_addr_range(
PageRangeInclusive {
start: Page::from_start_address(VirtAddr::new(0)).unwrap(),
end: Page::from_start_address(VirtAddr::new(0xffff_ffff_ffff_f000)).unwrap(),
},
frame_deallocator,
)
}

unsafe fn clean_up_addr_range<D>(
&mut self,
range: PageRangeInclusive,
frame_deallocator: &mut D,
) where
D: FrameDeallocator<Size4KiB>,
{
unsafe fn clean_up<P: PageTableFrameMapping>(
page_table: &mut PageTable,
page_table_walker: &PageTableWalker<P>,
level: PageTableLevel,
range: PageRangeInclusive,
frame_deallocator: &mut impl FrameDeallocator<Size4KiB>,
) -> bool {
if range.is_empty() {
return false;
}

let table_addr = range
.start
.start_address()
.align_down(level.table_address_space_alignment());

let start = range.start.page_table_index(level);
let end = range.end.page_table_index(level);

if let Some(next_level) = level.next_lower_level() {
let offset_per_entry = level.entry_address_space_alignment();
for (i, entry) in page_table
.iter_mut()
.enumerate()
.take(usize::from(end) + 1)
.skip(usize::from(start))
{
if let Ok(page_table) = page_table_walker.next_table_mut(entry) {
let start = table_addr + (offset_per_entry * (i as u64));
let end = start + (offset_per_entry - 1);
let start = Page::<Size4KiB>::containing_address(start);
let start = start.max(range.start);
let end = Page::<Size4KiB>::containing_address(end);
let end = end.min(range.end);
if clean_up(
page_table,
page_table_walker,
next_level,
Page::range_inclusive(start, end),
frame_deallocator,
) {
let frame = entry.frame().unwrap();
entry.set_unused();
frame_deallocator.deallocate_frame(frame);
}
}
}
}

page_table.iter().all(PageTableEntry::is_unused)
}

clean_up(
self.level_4_table,
&self.page_table_walker,
PageTableLevel::Four,
range,
frame_deallocator,
);
}
}

#[derive(Debug)]
struct PageTableWalker<P: PageTableFrameMapping> {
page_table_frame_mapping: P,
Expand Down
48 changes: 46 additions & 2 deletions src/structures/paging/mapper/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7,8 +7,10 @@ pub use self::offset_page_table::OffsetPageTable;
pub use self::recursive_page_table::{InvalidPageTable, RecursivePageTable};

use crate::structures::paging::{
frame_alloc::FrameAllocator, page_table::PageTableFlags, Page, PageSize, PhysFrame, Size1GiB,
Size2MiB, Size4KiB,
frame_alloc::{FrameAllocator, FrameDeallocator},
page::PageRangeInclusive,
page_table::PageTableFlags,
Page, PageSize, PhysFrame, Size1GiB, Size2MiB, Size4KiB,
};
use crate::{PhysAddr, VirtAddr};

Expand Down Expand Up @@ -480,3 +482,45 @@ pub enum TranslateError {
}

static _ASSERT_OBJECT_SAFE: Option<&(dyn Translate + Sync)> = None;

/// Provides methods for cleaning up unused entries.
pub trait CleanUp {
/// Remove all empty P1-P3 tables
///
/// ## Safety
///
/// The caller has to guarantee that it's safe to free page table frames:
/// All page table frames must only be used once and only in this page table
/// (e.g. no reference counted page tables or reusing the same page tables for different virtual addresses ranges in the same page table).
unsafe fn clean_up<D>(&mut self, frame_deallocator: &mut D)
where
D: FrameDeallocator<Size4KiB>;

/// Remove all empty P1-P3 tables in a certain range
/// ```
/// # use core::ops::RangeInclusive;
/// # use x86_64::{VirtAddr, structures::paging::{
/// # FrameDeallocator, Size4KiB, MappedPageTable, mapper::{RecursivePageTable, CleanUp}, page::{Page, PageRangeInclusive},
/// # }};
/// # unsafe fn test(page_table: &mut RecursivePageTable, frame_deallocator: &mut impl FrameDeallocator<Size4KiB>) {
/// // clean up all page tables in the lower half of the address space
/// let lower_half = Page::range_inclusive(
/// Page::containing_address(VirtAddr::new(0)),
/// Page::containing_address(VirtAddr::new(0x0000_7fff_ffff_ffff)),
/// );
/// page_table.clean_up_addr_range(lower_half, frame_deallocator);
/// # }
/// ```
///
/// ## Safety
///
/// The caller has to guarantee that it's safe to free page table frames:
/// All page table frames must only be used once and only in this page table
/// (e.g. no reference counted page tables or reusing the same page tables for different virtual addresses ranges in the same page table).
unsafe fn clean_up_addr_range<D>(
&mut self,
range: PageRangeInclusive,
frame_deallocator: &mut D,
) where
D: FrameDeallocator<Size4KiB>;
}
24 changes: 23 additions & 1 deletion src/structures/paging/mapper/offset_page_table.rs
Original file line number Diff line number Diff line change
@@ -1,7 +1,8 @@
#![cfg(target_pointer_width = "64")]

use crate::structures::paging::{
frame::PhysFrame, mapper::*, page_table::PageTable, Page, PageTableFlags,
frame::PhysFrame, mapper::*, page::PageRangeInclusive, page_table::PageTable, FrameDeallocator,
Page, PageTableFlags,
};

/// A Mapper implementation that requires that the complete physically memory is mapped at some
Expand Down Expand Up @@ -264,3 +265,24 @@ impl<'a> Translate for OffsetPageTable<'a> {
self.inner.translate(addr)
}
}

impl<'a> CleanUp for OffsetPageTable<'a> {
#[inline]
unsafe fn clean_up<D>(&mut self, frame_deallocator: &mut D)
where
D: FrameDeallocator<Size4KiB>,
{
self.inner.clean_up(frame_deallocator)
}

#[inline]
unsafe fn clean_up_addr_range<D>(
&mut self,
range: PageRangeInclusive,
frame_deallocator: &mut D,
) where
D: FrameDeallocator<Size4KiB>,
{
self.inner.clean_up_addr_range(range, frame_deallocator)
}
}
97 changes: 94 additions & 3 deletions src/structures/paging/mapper/recursive_page_table.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,12 +4,12 @@ use core::fmt;

use super::*;
use crate::registers::control::Cr3;
use crate::structures::paging::PageTableIndex;
use crate::structures::paging::page_table::PageTableLevel;
use crate::structures::paging::{
frame_alloc::FrameAllocator,
page::{AddressNotAligned, NotGiantPageSize},
page::{AddressNotAligned, NotGiantPageSize, PageRangeInclusive},
page_table::{FrameError, PageTable, PageTableEntry, PageTableFlags},
Page, PageSize, PhysFrame, Size1GiB, Size2MiB, Size4KiB,
FrameDeallocator, Page, PageSize, PageTableIndex, PhysFrame, Size1GiB, Size2MiB, Size4KiB,
};
use crate::VirtAddr;

Expand Down Expand Up @@ -829,6 +829,97 @@ impl<'a> Translate for RecursivePageTable<'a> {
}
}

impl<'a> CleanUp for RecursivePageTable<'a> {
#[inline]
unsafe fn clean_up<D>(&mut self, frame_deallocator: &mut D)
where
D: FrameDeallocator<Size4KiB>,
{
self.clean_up_addr_range(
PageRangeInclusive {
start: Page::from_start_address(VirtAddr::new(0)).unwrap(),
end: Page::from_start_address(VirtAddr::new(0xffff_ffff_ffff_f000)).unwrap(),
},
frame_deallocator,
)
}

unsafe fn clean_up_addr_range<D>(
&mut self,
range: PageRangeInclusive,
frame_deallocator: &mut D,
) where
D: FrameDeallocator<Size4KiB>,
{
fn clean_up(
recursive_index: PageTableIndex,
page_table: &mut PageTable,
level: PageTableLevel,
range: PageRangeInclusive,
frame_deallocator: &mut impl FrameDeallocator<Size4KiB>,
) -> bool {
if range.is_empty() {
return false;
}

let table_addr = range
.start
.start_address()
.align_down(level.table_address_space_alignment());

let start = range.start.page_table_index(level);
let end = range.end.page_table_index(level);

if let Some(next_level) = level.next_lower_level() {
let offset_per_entry = level.entry_address_space_alignment();
for (i, entry) in page_table
.iter_mut()
.enumerate()
.take(usize::from(end) + 1)
.skip(usize::from(start))
.filter(|(i, _)| {
!(level == PageTableLevel::Four && *i == recursive_index.into())
})
{
if let Ok(frame) = entry.frame() {
let start = table_addr + (offset_per_entry * (i as u64));
let end = start + (offset_per_entry - 1);
let start = Page::<Size4KiB>::containing_address(start);
let start = start.max(range.start);
let end = Page::<Size4KiB>::containing_address(end);
let end = end.min(range.end);
let page_table =
[p1_ptr, p2_ptr, p3_ptr][level as usize - 2](start, recursive_index);
let page_table = unsafe { &mut *page_table };
if clean_up(
recursive_index,
page_table,
next_level,
Page::range_inclusive(start, end),
frame_deallocator,
) {
entry.set_unused();
unsafe {
frame_deallocator.deallocate_frame(frame);
}
}
}
}
}

page_table.iter().all(PageTableEntry::is_unused)
}

clean_up(
self.recursive_index,
self.level_4_table(),
PageTableLevel::Four,
range,
frame_deallocator,
);
}
}

/// The given page table was not suitable to create a `RecursivePageTable`.
#[derive(Debug)]
pub enum InvalidPageTable {
Expand Down
Loading