From 2d49a54a7d270a63b22d17c89dcf8ee6c1dc681f Mon Sep 17 00:00:00 2001 From: Harald Hoyer Date: Mon, 7 Dec 2020 15:19:47 +0100 Subject: [PATCH] feat(shim-sev): add mremap syscall Signed-off-by: Harald Hoyer --- internal/shim-sev/src/syscall.rs | 241 ++++++++++++++++++++++++++++++- internal/syscall/src/lib.rs | 3 + internal/syscall/src/memory.rs | 26 +++- tests/bin/memory_stress_test.rs | 15 +- 4 files changed, 277 insertions(+), 8 deletions(-) diff --git a/internal/shim-sev/src/syscall.rs b/internal/shim-sev/src/syscall.rs index 5efa026c..35dc41ac 100644 --- a/internal/shim-sev/src/syscall.rs +++ b/internal/shim-sev/src/syscall.rs @@ -3,7 +3,7 @@ //! syscall interface layer between assembler and rust use crate::addr::{HostVirtAddr, ShimPhysUnencryptedAddr}; -use crate::allocator::ALLOCATOR; +use crate::allocator::{AllocateError, ALLOCATOR}; use crate::asm::_enarx_asm_triple_fault; use crate::attestation::SEV_SECRET; use crate::eprintln; @@ -23,7 +23,10 @@ use syscall::{ use untrusted::{AddressValidator, UntrustedRef, UntrustedRefMut, Validate, ValidateSlice}; use x86_64::instructions::tlb::flush_all; use x86_64::registers::{rdfsbase, rdgsbase, wrfsbase, wrgsbase}; -use x86_64::structures::paging::{Page, PageTableFlags, Size4KiB}; +use x86_64::structures::paging::mapper::{MapToError, MappedFrame, TranslateResult}; +use x86_64::structures::paging::{ + Mapper, Page, PageTableFlags, Size1GiB, Size2MiB, Size4KiB, Translate, +}; use x86_64::{align_up, VirtAddr}; #[repr(C)] @@ -296,8 +299,6 @@ impl MemorySyscallHandler for Handler { self.trace("mprotect", 3); let addr = addr.as_ptr(); - use x86_64::structures::paging::mapper::Mapper; - let mut flags = PageTableFlags::PRESENT | PageTableFlags::USER_ACCESSIBLE; if prot & libc::PROT_WRITE != 0 { @@ -396,6 +397,238 @@ impl MemorySyscallHandler for Handler { } } + fn mremap( + &mut self, + old_addr: UntrustedRefMut, + old_size: libc::size_t, + new_size: libc::size_t, + flags: libc::c_int, + _new_addr: UntrustedRef, + ) -> sallyport::Result { + self.trace("mremap", 5); + + if old_size >= new_size { + return Err(libc::EINVAL); + } + + let mut target_addr = old_addr + .validate_slice(old_size, self) + .ok_or(libc::EINVAL)?; + + target_addr = unsafe { + if (flags & libc::MREMAP_FIXED) != libc::MREMAP_FIXED + && (flags & libc::MREMAP_MAYMOVE) == libc::MREMAP_MAYMOVE + { + let add_addr = target_addr.as_ptr().add(old_size); + let mut mapper = SHIM_PAGETABLE.write(); + let mut allocator = ALLOCATOR.write(); + + match allocator.allocate_and_map_memory( + mapper.deref_mut(), + VirtAddr::from_ptr(add_addr), + new_size - old_size, + PageTableFlags::PRESENT + | PageTableFlags::USER_ACCESSIBLE + | PageTableFlags::WRITABLE, + PageTableFlags::PRESENT + | PageTableFlags::WRITABLE + | PageTableFlags::USER_ACCESSIBLE, + ) { + Err(AllocateError::NotAligned) => Err(libc::EINVAL), + Err(AllocateError::OutOfMemory) => Err(libc::ENOMEM), + Err(AllocateError::ZeroSize) => Err(libc::EINVAL), + Err(AllocateError::PageAlreadyMapped) + | Err(AllocateError::ParentEntryHugePage) => { + // remap the whole thing + let mut new_target_addr = *NEXT_MMAP_RWLOCK.read().deref(); + + while mapper.translate_addr(new_target_addr).is_some() { + new_target_addr += Page::::SIZE; + } + + let mut to_map = old_size as u64; + let mut old_addr = VirtAddr::from_ptr(target_addr.as_ptr()); + + let mut new_addr = new_target_addr; + + loop { + let mapped = match mapper.translate(old_addr) { + TranslateResult::Mapped { + frame: MappedFrame::Size4KiB(frame), + offset: 0, + .. + } => { + mapper + .map_to( + Page::::containing_address(new_addr), + frame, + PageTableFlags::PRESENT + | PageTableFlags::USER_ACCESSIBLE + | PageTableFlags::WRITABLE, + allocator.deref_mut(), + ) + .map_err(|e| match e { + MapToError::FrameAllocationFailed => libc::ENOMEM, + MapToError::ParentEntryHugePage => libc::EINVAL, + MapToError::PageAlreadyMapped(_) => libc::EINVAL, + })? + .flush(); + Page::::SIZE + } + TranslateResult::Mapped { + frame: MappedFrame::Size2MiB(frame), + offset: 0, + .. + } => { + mapper + .map_to( + Page::::containing_address(new_addr), + frame, + PageTableFlags::PRESENT + | PageTableFlags::USER_ACCESSIBLE + | PageTableFlags::WRITABLE, + allocator.deref_mut(), + ) + .map_err(|e| match e { + MapToError::FrameAllocationFailed => libc::ENOMEM, + MapToError::ParentEntryHugePage => libc::EINVAL, + MapToError::PageAlreadyMapped(_) => libc::EINVAL, + })? + .flush(); + Page::::SIZE + } + TranslateResult::Mapped { + frame: MappedFrame::Size1GiB(frame), + offset: 0, + .. + } => { + mapper + .map_to( + Page::::containing_address(new_addr), + frame, + PageTableFlags::PRESENT + | PageTableFlags::USER_ACCESSIBLE + | PageTableFlags::WRITABLE, + allocator.deref_mut(), + ) + .map_err(|e| match e { + MapToError::FrameAllocationFailed => libc::ENOMEM, + MapToError::ParentEntryHugePage => libc::EINVAL, + MapToError::PageAlreadyMapped(_) => libc::EINVAL, + })? + .flush(); + Page::::SIZE + } + _ => return Err(libc::EINVAL), + }; + + old_addr += mapped; + new_addr += mapped; + + if to_map <= mapped { + break; + } + + to_map -= mapped; + } + + let _p = allocator + .allocate_and_map_memory( + mapper.deref_mut(), + new_addr, + new_size - old_size, + PageTableFlags::PRESENT + | PageTableFlags::USER_ACCESSIBLE + | PageTableFlags::WRITABLE, + PageTableFlags::PRESENT + | PageTableFlags::WRITABLE + | PageTableFlags::USER_ACCESSIBLE, + ) + .map_err(|e| match e { + AllocateError::NotAligned => libc::EINVAL, + AllocateError::OutOfMemory => libc::ENOMEM, + AllocateError::ZeroSize => libc::EINVAL, + AllocateError::PageAlreadyMapped => libc::EINVAL, + AllocateError::ParentEntryHugePage => libc::EINVAL, + })?; + + let mut old_addr = VirtAddr::from_ptr(target_addr.as_ptr()); + let mut to_map = old_size as u64; + + loop { + let mapped = match mapper.translate(old_addr) { + TranslateResult::Mapped { + frame: MappedFrame::Size4KiB(_), + offset: 0, + .. + } => { + let (_, flush) = mapper + .unmap(Page::::containing_address(old_addr)) + .map_err( + |_| libc::EINVAL, // FIXME + )?; + flush.flush(); + Page::::SIZE + } + TranslateResult::Mapped { + frame: MappedFrame::Size2MiB(_), + offset: 0, + .. + } => { + let (_, flush) = mapper + .unmap(Page::::containing_address(old_addr)) + .map_err( + |_| libc::EINVAL, // FIXME + )?; + flush.flush(); + Page::::SIZE + } + TranslateResult::Mapped { + frame: MappedFrame::Size1GiB(_), + offset: 0, + .. + } => { + let (_, flush) = mapper + .unmap(Page::::containing_address(old_addr)) + .map_err( + |_| libc::EINVAL, // FIXME + )?; + flush.flush(); + Page::::SIZE + } + _ => return Err(libc::EINVAL), + }; + + old_addr += mapped; + + if to_map <= mapped { + break; + } + + to_map -= mapped; + } + + let len_aligned = + align_up((new_size - old_size) as _, Page::::SIZE); + *NEXT_MMAP_RWLOCK.write().deref_mut() = new_target_addr + len_aligned; + + new_target_addr + .as_mut_ptr::() + .as_mut() + .ok_or(libc::EINVAL) + .map(|v| core::slice::from_raw_parts_mut(v, new_size)) + } + Ok(_p) => Ok(target_addr), + }? + } else { + return Err(libc::EINVAL); + } + }; + + eprintln!("SC> mremap() = {:?}", target_addr.as_ptr()); + Ok([target_addr.into(), Default::default()]) + } + fn munmap(&mut self, addr: UntrustedRef, length: usize) -> sallyport::Result { self.trace("munmap", 2); diff --git a/internal/syscall/src/lib.rs b/internal/syscall/src/lib.rs index 22d9e640..67d500e9 100644 --- a/internal/syscall/src/lib.rs +++ b/internal/syscall/src/lib.rs @@ -92,6 +92,9 @@ pub trait SyscallHandler: usize::from(e) as _, f.into(), ), + libc::SYS_mremap => { + self.mremap(a.into(), b.into(), c.into(), usize::from(d) as _, e.into()) + } libc::SYS_munmap => self.munmap(a.into(), b.into()), libc::SYS_madvise => self.madvise(a.into(), b.into(), usize::from(c) as _), libc::SYS_mprotect => self.mprotect(a.into(), b.into(), usize::from(c) as _), diff --git a/internal/syscall/src/memory.rs b/internal/syscall/src/memory.rs index c0593786..c413f72f 100644 --- a/internal/syscall/src/memory.rs +++ b/internal/syscall/src/memory.rs @@ -2,11 +2,12 @@ //! memory syscalls +use crate::BaseSyscallHandler; use sallyport::Result; -use untrusted::UntrustedRef; +use untrusted::{UntrustedRef, UntrustedRefMut}; /// memory syscalls -pub trait MemorySyscallHandler { +pub trait MemorySyscallHandler: BaseSyscallHandler { /// syscall fn brk(&mut self, addr: *const u8) -> Result; @@ -34,4 +35,25 @@ pub trait MemorySyscallHandler { /// syscall fn mprotect(&mut self, addr: UntrustedRef, len: libc::size_t, prot: libc::c_int) -> Result; + + /// syscall + fn mremap( + &mut self, + old_addr: UntrustedRefMut, + old_size: libc::size_t, + new_size: libc::size_t, + flags: libc::c_int, + new_addr: UntrustedRef, + ) -> Result { + self.unknown_syscall( + old_addr.as_ptr().into(), + old_size.into(), + (flags as usize).into(), + new_size.into(), + new_addr.as_ptr().into(), + 0.into(), + libc::SYS_mremap as _, + ); + Err(libc::ENOSYS) + } } diff --git a/tests/bin/memory_stress_test.rs b/tests/bin/memory_stress_test.rs index 27f719a1..a9656a14 100644 --- a/tests/bin/memory_stress_test.rs +++ b/tests/bin/memory_stress_test.rs @@ -3,13 +3,14 @@ const SIZE_32M: usize = 1024 * 1024 * 32; fn main() { let mut ret = 0; let mut size: usize = 1; + let mut vec = Vec::new(); while size < SIZE_32M { - let mut vec = Vec::with_capacity(size); + vec.resize(size, 0); vec.push(0u8); ret += vec.pop().unwrap(); size *= 2; - drop(vec); } + drop(vec); for _i in 0..100 { let mut vec = Vec::with_capacity(size); @@ -26,5 +27,15 @@ fn main() { drop(vec); } + size = 1; + let mut vec = Vec::new(); + while size < SIZE_32M { + vec.resize(size, 0); + vec.push(0u8); + ret += vec.pop().unwrap(); + size *= 2; + } + drop(vec); + std::process::exit(ret as _); }