diff --git a/rust-version b/rust-version index 28dca558cf..937df90718 100644 --- a/rust-version +++ b/rust-version @@ -1 +1 @@ -fdaf594bab31eec75fb6d582cd33e5a5b43de7f4 +4894123d21ed4b153a2e5c32c0870cb2d97f9b46 diff --git a/src/intptrcast.rs b/src/intptrcast.rs index b1e89f3819..e08166b8c2 100644 --- a/src/intptrcast.rs +++ b/src/intptrcast.rs @@ -47,7 +47,7 @@ impl<'mir, 'tcx> GlobalState { } let global_state = memory.extra.intptrcast.borrow(); - + Ok(match global_state.int_to_ptr_map.binary_search_by_key(&int, |(addr, _)| *addr) { Ok(pos) => { let (_, alloc_id) = global_state.int_to_ptr_map[pos]; @@ -55,7 +55,7 @@ impl<'mir, 'tcx> GlobalState { // zero. The pointer is untagged because it was created from a cast Pointer::new_with_tag(alloc_id, Size::from_bytes(0), Tag::Untagged) }, - Err(0) => throw_unsup!(DanglingPointerDeref), + Err(0) => throw_unsup!(DanglingPointerDeref), Err(pos) => { // This is the largest of the adresses smaller than `int`, // i.e. the greatest lower bound (glb) @@ -63,12 +63,12 @@ impl<'mir, 'tcx> GlobalState { // This never overflows because `int >= glb` let offset = int - glb; // If the offset exceeds the size of the allocation, this access is illegal - if offset <= memory.get(alloc_id)?.bytes.len() as u64 { + if offset <= memory.get(alloc_id)?.size.bytes() { // This pointer is untagged because it was created from a cast Pointer::new_with_tag(alloc_id, Size::from_bytes(offset), Tag::Untagged) } else { throw_unsup!(DanglingPointerDeref) - } + } } }) } @@ -108,7 +108,7 @@ impl<'mir, 'tcx> GlobalState { global_state.next_base_addr = base_addr.checked_add(max(size.bytes(), 1)).unwrap(); // Given that `next_base_addr` increases in each allocation, pushing the // corresponding tuple keeps `int_to_ptr_map` sorted - global_state.int_to_ptr_map.push((base_addr, ptr.alloc_id)); + global_state.int_to_ptr_map.push((base_addr, ptr.alloc_id)); base_addr } diff --git a/src/machine.rs b/src/machine.rs index 3853a45fd9..95cb31d67b 100644 --- a/src/machine.rs +++ b/src/machine.rs @@ -248,7 +248,7 @@ impl<'mir, 'tcx> Machine<'mir, 'tcx> for Evaluator<'tcx> { None => tcx.item_name(def_id).as_str(), }; - let alloc = match link_name.get() { + let alloc = match &*link_name { "__cxa_thread_atexit_impl" => { // This should be all-zero, pointer-sized. let size = tcx.data_layout.pointer_size; @@ -280,40 +280,25 @@ impl<'mir, 'tcx> Machine<'mir, 'tcx> for Evaluator<'tcx> { } else { let (stacks, base_tag) = Stacks::new_allocation( id, - Size::from_bytes(alloc.bytes.len() as u64), + alloc.size, Rc::clone(&memory_extra.stacked_borrows), kind, ); (Some(stacks), base_tag) }; - if kind != MiriMemoryKind::Static.into() { - assert!(alloc.relocations.is_empty(), "Only statics can come initialized with inner pointers"); - // Now we can rely on the inner pointers being static, too. - } let mut stacked_borrows = memory_extra.stacked_borrows.borrow_mut(); - let alloc: Allocation = Allocation { - bytes: alloc.bytes, - relocations: Relocations::from_presorted( - alloc.relocations.iter() - // The allocations in the relocations (pointers stored *inside* this allocation) - // all get the base pointer tag. - .map(|&(offset, ((), alloc))| { - let tag = if !memory_extra.validate { - Tag::Untagged - } else { - stacked_borrows.static_base_ptr(alloc) - }; - (offset, (tag, alloc)) - }) - .collect() - ), - undef_mask: alloc.undef_mask, - align: alloc.align, - mutability: alloc.mutability, - extra: AllocExtra { + let alloc: Allocation = alloc.retag( + |alloc| if !memory_extra.validate { + Tag::Untagged + } else { + // Only statics may already contain pointers at this point + assert_eq!(kind, MiriMemoryKind::Static.into()); + stacked_borrows.static_base_ptr(alloc) + }, + AllocExtra { stacked_borrows: stacks, }, - }; + ); (Cow::Owned(alloc), base_tag) } diff --git a/src/shims/foreign_items.rs b/src/shims/foreign_items.rs index 0b2fa64996..90c18265fc 100644 --- a/src/shims/foreign_items.rs +++ b/src/shims/foreign_items.rs @@ -138,7 +138,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx None => this.tcx.item_name(def_id).as_str(), }; // Strip linker suffixes (seen on 32-bit macOS). - let link_name = link_name.get().trim_end_matches("$UNIX2003"); + let link_name = link_name.trim_end_matches("$UNIX2003"); let tcx = &{this.tcx.tcx}; // First: functions that diverge. diff --git a/src/shims/intrinsics.rs b/src/shims/intrinsics.rs index 8656962761..06af6db76a 100644 --- a/src/shims/intrinsics.rs +++ b/src/shims/intrinsics.rs @@ -28,8 +28,8 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx // (as opposed to through a place), we have to remember to erase any tag // that might still hang around! - let intrinsic_name = this.tcx.item_name(instance.def_id()).as_str(); - match intrinsic_name.get() { + let intrinsic_name = &*this.tcx.item_name(instance.def_id()).as_str(); + match intrinsic_name { "arith_offset" => { let offset = this.read_scalar(args[1])?.to_isize(this)?; let ptr = this.read_scalar(args[0])?.not_undef()?; @@ -228,7 +228,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx "log10f32" | "log2f32" | "floorf32" | "ceilf32" | "truncf32" | "roundf32" => { // FIXME: Using host floats. let f = f32::from_bits(this.read_scalar(args[0])?.to_u32()?); - let f = match intrinsic_name.get() { + let f = match intrinsic_name { "sinf32" => f.sin(), "fabsf32" => f.abs(), "cosf32" => f.cos(), @@ -251,7 +251,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx "log10f64" | "log2f64" | "floorf64" | "ceilf64" | "truncf64" | "roundf64" => { // FIXME: Using host floats. let f = f64::from_bits(this.read_scalar(args[0])?.to_u64()?); - let f = match intrinsic_name.get() { + let f = match intrinsic_name { "sinf64" => f.sin(), "fabsf64" => f.abs(), "cosf64" => f.cos(), @@ -273,7 +273,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx "fadd_fast" | "fsub_fast" | "fmul_fast" | "fdiv_fast" | "frem_fast" => { let a = this.read_immediate(args[0])?; let b = this.read_immediate(args[1])?; - let op = match intrinsic_name.get() { + let op = match intrinsic_name { "fadd_fast" => mir::BinOp::Add, "fsub_fast" => mir::BinOp::Sub, "fmul_fast" => mir::BinOp::Mul, @@ -287,7 +287,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx "minnumf32" | "maxnumf32" => { let a = this.read_scalar(args[0])?.to_f32()?; let b = this.read_scalar(args[1])?.to_f32()?; - let res = if intrinsic_name.get().starts_with("min") { + let res = if intrinsic_name.starts_with("min") { a.min(b) } else { a.max(b) @@ -298,7 +298,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx "minnumf64" | "maxnumf64" => { let a = this.read_scalar(args[0])?.to_f64()?; let b = this.read_scalar(args[1])?.to_f64()?; - let res = if intrinsic_name.get().starts_with("min") { + let res = if intrinsic_name.starts_with("min") { a.min(b) } else { a.max(b) @@ -509,7 +509,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx "unchecked_add" | "unchecked_sub" | "unchecked_mul" => { let l = this.read_immediate(args[0])?; let r = this.read_immediate(args[1])?; - let op = match intrinsic_name.get() { + let op = match intrinsic_name { "unchecked_add" => mir::BinOp::Add, "unchecked_sub" => mir::BinOp::Sub, "unchecked_mul" => mir::BinOp::Mul, @@ -517,7 +517,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx }; let (res, overflowed, _ty) = this.overflowing_binary_op(op, l, r)?; if overflowed { - throw_ub_format!("Overflowing arithmetic in {}", intrinsic_name.get()); + throw_ub_format!("Overflowing arithmetic in {}", intrinsic_name); } this.write_scalar(res, dest)?; }