|
| 1 | +//! This is an incomplete implementation of mmap/mremap/munmap which is restricted in order to be |
| 2 | +//! implementable on top of the existing memory system. The point of these function as-written is |
| 3 | +//! to allow memory allocators written entirely in Rust to be executed by Miri. This implementation |
| 4 | +//! does not support other uses of mmap such as file mappings. |
| 5 | +//! |
| 6 | +//! mmap/mremap/munmap behave a lot like alloc/realloc/dealloc, and for simple use they are exactly |
| 7 | +//! equivalent. But the memory-mapping API provides more control. For example: |
| 8 | +//! |
| 9 | +//! * It is possible to munmap a single page in the middle of a mapped region. We do not have a way |
| 10 | +//! to express non-contiguous allocations. |
| 11 | +//! |
| 12 | +//! * With MAP_FIXED it is possible to call mmap multiple times, but create a single contiguous |
| 13 | +//! range of mapped virtual addresses. A memory allocator can then choose to carve this up into |
| 14 | +//! allocations in arbitrary ways. |
| 15 | +//! |
| 16 | +//! For now, none of this is possible with the Miri implementation. |
| 17 | +
|
| 18 | +use crate::*; |
| 19 | +use rustc_target::abi::{Align, Size}; |
| 20 | + |
| 21 | +impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for crate::MiriInterpCx<'mir, 'tcx> {} |
| 22 | +pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { |
| 23 | + fn mmap( |
| 24 | + &mut self, |
| 25 | + addr: &OpTy<'tcx, Provenance>, |
| 26 | + length: &OpTy<'tcx, Provenance>, |
| 27 | + prot: &OpTy<'tcx, Provenance>, |
| 28 | + flags: &OpTy<'tcx, Provenance>, |
| 29 | + fd: &OpTy<'tcx, Provenance>, |
| 30 | + offset: &OpTy<'tcx, Provenance>, |
| 31 | + ) -> InterpResult<'tcx, Scalar<Provenance>> { |
| 32 | + let this = self.eval_context_mut(); |
| 33 | + |
| 34 | + // We do not support MAP_FIXED, so the addr argument is always ignored |
| 35 | + let addr = this.read_pointer(addr)?; |
| 36 | + let length = this.read_scalar(length)?.to_target_usize(this)?; |
| 37 | + let prot = this.read_scalar(prot)?.to_i32()?; |
| 38 | + let flags = this.read_scalar(flags)?.to_i32()?; |
| 39 | + let fd = this.read_scalar(fd)?.to_i32()?; |
| 40 | + let offset = this.read_scalar(offset)?.to_target_usize(this)?; |
| 41 | + |
| 42 | + let map_private = this.eval_libc_i32("MAP_PRIVATE"); |
| 43 | + let map_anonymous = this.eval_libc_i32("MAP_ANONYMOUS"); |
| 44 | + let map_shared = this.eval_libc_i32("MAP_SHARED"); |
| 45 | + let map_fixed = this.eval_libc_i32("MAP_FIXED"); |
| 46 | + |
| 47 | + // This is a horrible hack, but since the guard page mechanism calls mmap and expect |
| 48 | + // a particular return value, we just give it that value. |
| 49 | + if this.frame_in_std() && this.tcx.sess.target.os == "macos" && (flags & map_fixed) != 0 { |
| 50 | + return Ok(Scalar::from_maybe_pointer(addr, this)); |
| 51 | + } |
| 52 | + |
| 53 | + let prot_read = this.eval_libc_i32("PROT_READ"); |
| 54 | + let prot_write = this.eval_libc_i32("PROT_WRITE"); |
| 55 | + |
| 56 | + // First, we do some basic argument validation as required by mmap |
| 57 | + if (flags & (map_private | map_shared)).count_ones() != 1 { |
| 58 | + this.set_last_error(Scalar::from_i32(this.eval_libc_i32("EINVAL")))?; |
| 59 | + return Ok(Scalar::from_maybe_pointer(Pointer::null(), this)); |
| 60 | + } |
| 61 | + if length == 0 { |
| 62 | + this.set_last_error(Scalar::from_i32(this.eval_libc_i32("EINVAL")))?; |
| 63 | + return Ok(Scalar::from_maybe_pointer(Pointer::null(), this)); |
| 64 | + } |
| 65 | + |
| 66 | + // If a user tries to map a file, we want to loudly inform them that this is not going |
| 67 | + // to work. It is possible that POSIX gives us enough leeway to return an error, but the |
| 68 | + // outcome for the user (I need to add cfg(miri)) is the same, just more frustrating. |
| 69 | + if fd != -1 { |
| 70 | + throw_unsup_format!("Miri does not support file-backed memory mappings"); |
| 71 | + } |
| 72 | + |
| 73 | + // POSIX says: |
| 74 | + // [ENOTSUP] |
| 75 | + // * MAP_FIXED or MAP_PRIVATE was specified in the flags argument and the implementation |
| 76 | + // does not support this functionality. |
| 77 | + // * The implementation does not support the combination of accesses requested in the |
| 78 | + // prot argument. |
| 79 | + // |
| 80 | + // Miri doesn't support MAP_FIXED or any any protections other than PROT_READ|PROT_WRITE. |
| 81 | + if flags & map_fixed != 0 || prot != prot_read | prot_write { |
| 82 | + this.set_last_error(Scalar::from_i32(this.eval_libc_i32("ENOTSUP")))?; |
| 83 | + return Ok(Scalar::from_maybe_pointer(Pointer::null(), this)); |
| 84 | + } |
| 85 | + |
| 86 | + // Miri does not support shared mappings, or any of the other extensions that for example |
| 87 | + // Linux has added to the flags arguments. |
| 88 | + if flags != map_private | map_anonymous { |
| 89 | + throw_unsup_format!( |
| 90 | + "Miri only supports calls to mmap which set the flags argument to MAP_PRIVATE|MAP_ANONYMOUS" |
| 91 | + ); |
| 92 | + } |
| 93 | + |
| 94 | + // This is only used for file mappings, which we don't support anyway. |
| 95 | + if offset != 0 { |
| 96 | + throw_unsup_format!("Miri does not support non-zero offsets to mmap"); |
| 97 | + } |
| 98 | + |
| 99 | + let align = Align::from_bytes(this.machine.page_size).unwrap(); |
| 100 | + let map_length = this.machine.round_up_to_multiple_of_page_size(length).unwrap_or(u64::MAX); |
| 101 | + |
| 102 | + let ptr = |
| 103 | + this.allocate_ptr(Size::from_bytes(map_length), align, MiriMemoryKind::Mmap.into())?; |
| 104 | + // We just allocated this, the access is definitely in-bounds and fits into our address space. |
| 105 | + // mmap guarantees new mappings are zero-init. |
| 106 | + this.write_bytes_ptr( |
| 107 | + ptr.into(), |
| 108 | + std::iter::repeat(0u8).take(usize::try_from(map_length).unwrap()), |
| 109 | + ) |
| 110 | + .unwrap(); |
| 111 | + // Memory mappings are always exposed |
| 112 | + Machine::expose_ptr(this, ptr)?; |
| 113 | + |
| 114 | + Ok(Scalar::from_pointer(ptr, this)) |
| 115 | + } |
| 116 | + |
| 117 | + fn mremap( |
| 118 | + &mut self, |
| 119 | + old_address: &OpTy<'tcx, Provenance>, |
| 120 | + old_size: &OpTy<'tcx, Provenance>, |
| 121 | + new_size: &OpTy<'tcx, Provenance>, |
| 122 | + flags: &OpTy<'tcx, Provenance>, |
| 123 | + ) -> InterpResult<'tcx, Scalar<Provenance>> { |
| 124 | + let this = self.eval_context_mut(); |
| 125 | + |
| 126 | + let old_address = this.read_pointer(old_address)?; |
| 127 | + let old_size = this.read_scalar(old_size)?.to_target_usize(this)?; |
| 128 | + let new_size = this.read_scalar(new_size)?.to_target_usize(this)?; |
| 129 | + let flags = this.read_scalar(flags)?.to_i32()?; |
| 130 | + |
| 131 | + // old_address must be a multiple of the page size |
| 132 | + #[allow(clippy::arithmetic_side_effects)] // PAGE_SIZE is nonzero |
| 133 | + if old_address.addr().bytes() % this.machine.page_size != 0 || new_size == 0 { |
| 134 | + this.set_last_error(Scalar::from_i32(this.eval_libc_i32("EINVAL")))?; |
| 135 | + return Ok(this.eval_libc("MAP_FAILED")); |
| 136 | + } |
| 137 | + |
| 138 | + if flags & this.eval_libc_i32("MREMAP_FIXED") != 0 { |
| 139 | + throw_unsup_format!("Miri does not support mremap wth MREMAP_FIXED"); |
| 140 | + } |
| 141 | + |
| 142 | + if flags & this.eval_libc_i32("MREMAP_DONTUNMAP") != 0 { |
| 143 | + throw_unsup_format!("Miri does not support mremap wth MREMAP_DONTUNMAP"); |
| 144 | + } |
| 145 | + |
| 146 | + if flags & this.eval_libc_i32("MREMAP_MAYMOVE") == 0 { |
| 147 | + // We only support MREMAP_MAYMOVE, so not passing the flag is just a failure |
| 148 | + this.set_last_error(Scalar::from_i32(this.eval_libc_i32("EINVAL")))?; |
| 149 | + return Ok(Scalar::from_maybe_pointer(Pointer::null(), this)); |
| 150 | + } |
| 151 | + |
| 152 | + let align = this.machine.page_align(); |
| 153 | + let ptr = this.reallocate_ptr( |
| 154 | + old_address, |
| 155 | + Some((Size::from_bytes(old_size), align)), |
| 156 | + Size::from_bytes(new_size), |
| 157 | + align, |
| 158 | + MiriMemoryKind::Mmap.into(), |
| 159 | + )?; |
| 160 | + if let Some(increase) = new_size.checked_sub(old_size) { |
| 161 | + // We just allocated this, the access is definitely in-bounds and fits into our address space. |
| 162 | + // mmap guarantees new mappings are zero-init. |
| 163 | + this.write_bytes_ptr( |
| 164 | + ptr.offset(Size::from_bytes(old_size), this).unwrap().into(), |
| 165 | + std::iter::repeat(0u8).take(usize::try_from(increase).unwrap()), |
| 166 | + ) |
| 167 | + .unwrap(); |
| 168 | + } |
| 169 | + // Memory mappings are always exposed |
| 170 | + Machine::expose_ptr(this, ptr)?; |
| 171 | + |
| 172 | + Ok(Scalar::from_pointer(ptr, this)) |
| 173 | + } |
| 174 | + |
| 175 | + fn munmap( |
| 176 | + &mut self, |
| 177 | + addr: &OpTy<'tcx, Provenance>, |
| 178 | + length: &OpTy<'tcx, Provenance>, |
| 179 | + ) -> InterpResult<'tcx, Scalar<Provenance>> { |
| 180 | + let this = self.eval_context_mut(); |
| 181 | + |
| 182 | + let addr = this.read_pointer(addr)?; |
| 183 | + let length = this.read_scalar(length)?.to_target_usize(this)?; |
| 184 | + |
| 185 | + // addr must be a multiple of the page size |
| 186 | + #[allow(clippy::arithmetic_side_effects)] // PAGE_SIZE is nonzero |
| 187 | + if addr.addr().bytes() % this.machine.page_size != 0 { |
| 188 | + this.set_last_error(Scalar::from_i32(this.eval_libc_i32("EINVAL")))?; |
| 189 | + return Ok(Scalar::from_i32(-1)); |
| 190 | + } |
| 191 | + |
| 192 | + let length = this.machine.round_up_to_multiple_of_page_size(length).unwrap_or(u64::MAX); |
| 193 | + |
| 194 | + let mut addr = addr.addr().bytes(); |
| 195 | + let mut bytes_unmapped = 0; |
| 196 | + while bytes_unmapped < length { |
| 197 | + // munmap specifies: |
| 198 | + // It is not an error if the indicated range does not contain any mapped pages. |
| 199 | + // So we make sure that if our address is not that of an exposed allocation, we just |
| 200 | + // step forward to the next page. |
| 201 | + let ptr = Machine::ptr_from_addr_cast(this, addr)?; |
| 202 | + let Ok(ptr) = ptr.into_pointer_or_addr() else { |
| 203 | + bytes_unmapped = bytes_unmapped.checked_add(this.machine.page_size).unwrap(); |
| 204 | + addr = addr.wrapping_add(this.machine.page_size); |
| 205 | + continue; |
| 206 | + }; |
| 207 | + // FIXME: This should fail if the pointer is to an unexposed allocation. But it |
| 208 | + // doesn't. |
| 209 | + let Some((alloc_id, offset, _prov)) = Machine::ptr_get_alloc(this, ptr) else { |
| 210 | + bytes_unmapped = bytes_unmapped.checked_add(this.machine.page_size).unwrap(); |
| 211 | + addr = addr.wrapping_add(this.machine.page_size); |
| 212 | + continue; |
| 213 | + }; |
| 214 | + |
| 215 | + if offset != Size::ZERO { |
| 216 | + throw_unsup_format!("Miri does not support partial munmap"); |
| 217 | + } |
| 218 | + let (_kind, alloc) = this.memory.alloc_map().get(alloc_id).unwrap(); |
| 219 | + let this_alloc_len = alloc.len() as u64; |
| 220 | + bytes_unmapped = bytes_unmapped.checked_add(this_alloc_len).unwrap(); |
| 221 | + if bytes_unmapped > length { |
| 222 | + throw_unsup_format!("Miri does not support partial munmap"); |
| 223 | + } |
| 224 | + |
| 225 | + this.deallocate_ptr( |
| 226 | + Pointer::new(Some(Provenance::Wildcard), Size::from_bytes(addr)), |
| 227 | + Some((Size::from_bytes(this_alloc_len), this.machine.page_align())), |
| 228 | + MemoryKind::Machine(MiriMemoryKind::Mmap), |
| 229 | + )?; |
| 230 | + addr = addr.wrapping_add(this_alloc_len); |
| 231 | + } |
| 232 | + |
| 233 | + Ok(Scalar::from_i32(0)) |
| 234 | + } |
| 235 | +} |
| 236 | + |
| 237 | +trait RangeExt { |
| 238 | + fn overlaps(&self, other: &Self) -> bool; |
| 239 | +} |
| 240 | +impl RangeExt for std::ops::Range<Size> { |
| 241 | + fn overlaps(&self, other: &Self) -> bool { |
| 242 | + self.start.max(other.start) <= self.end.min(other.end) |
| 243 | + } |
| 244 | +} |
0 commit comments