diff --git a/rust-version b/rust-version index 0c16500602..c0561119a9 100644 --- a/rust-version +++ b/rust-version @@ -1 +1 @@ -f65615f02d22b85e9205f2716ab36182d34bab2b +70540d51275086ce1a4cb12e9d96a97134df792e diff --git a/src/concurrency/data_race.rs b/src/concurrency/data_race.rs index 0080d0a9f6..13306b4809 100644 --- a/src/concurrency/data_race.rs +++ b/src/concurrency/data_race.rs @@ -48,7 +48,7 @@ use std::{ use rustc_ast::Mutability; use rustc_data_structures::fx::{FxHashMap, FxHashSet}; -use rustc_index::vec::{Idx, IndexVec}; +use rustc_index::{Idx, IndexVec}; use rustc_middle::mir; use rustc_span::Span; use rustc_target::abi::{Align, Size}; diff --git a/src/concurrency/init_once.rs b/src/concurrency/init_once.rs index 47ebf1b38e..1f57e8b2b0 100644 --- a/src/concurrency/init_once.rs +++ b/src/concurrency/init_once.rs @@ -1,7 +1,7 @@ use std::collections::VecDeque; use std::num::NonZeroU32; -use rustc_index::vec::Idx; +use rustc_index::Idx; use super::sync::EvalContextExtPriv as _; use super::thread::MachineCallback; diff --git a/src/concurrency/sync.rs b/src/concurrency/sync.rs index 08b13b956e..f37a2fd2cd 100644 --- a/src/concurrency/sync.rs +++ b/src/concurrency/sync.rs @@ -5,7 +5,7 @@ use std::ops::Not; use log::trace; use rustc_data_structures::fx::FxHashMap; -use rustc_index::vec::{Idx, IndexVec}; +use rustc_index::{Idx, IndexVec}; use super::init_once::InitOnce; use super::vector_clock::VClock; diff --git a/src/concurrency/thread.rs b/src/concurrency/thread.rs index 0d8d941c19..e9bbae4d50 100644 --- a/src/concurrency/thread.rs +++ b/src/concurrency/thread.rs @@ -10,7 +10,7 @@ use log::trace; use rustc_data_structures::fx::FxHashMap; use rustc_hir::def_id::DefId; -use rustc_index::vec::{Idx, IndexVec}; +use rustc_index::{Idx, IndexVec}; use rustc_middle::mir::Mutability; use rustc_middle::ty::layout::TyAndLayout; use rustc_span::Span; @@ -603,10 +603,11 @@ impl<'mir, 'tcx: 'mir> ThreadManager<'mir, 'tcx> { // this allows us to have a deterministic scheduler. for thread in self.threads.indices() { match self.timeout_callbacks.entry(thread) { - Entry::Occupied(entry) => + Entry::Occupied(entry) => { if entry.get().call_time.get_wait_time(clock) == Duration::new(0, 0) { return Some((thread, entry.remove().callback)); - }, + } + } Entry::Vacant(_) => {} } } diff --git a/src/concurrency/vector_clock.rs b/src/concurrency/vector_clock.rs index ab4764dd1c..a6e67ef869 100644 --- a/src/concurrency/vector_clock.rs +++ b/src/concurrency/vector_clock.rs @@ -1,4 +1,4 @@ -use rustc_index::vec::Idx; +use rustc_index::Idx; use rustc_span::{Span, SpanData, DUMMY_SP}; use smallvec::SmallVec; use std::{ diff --git a/tests/pass/tree-borrows/read-only-from-mut.rs b/tests/pass/tree-borrows/read-only-from-mut.rs deleted file mode 100644 index 4daf06c777..0000000000 --- a/tests/pass/tree-borrows/read-only-from-mut.rs +++ /dev/null @@ -1,14 +0,0 @@ -//@compile-flags: -Zmiri-tree-borrows - -// Tree Borrows has no issue with several mutable references existing -// at the same time, as long as they are used only immutably. -// I.e. multiple Reserved can coexist. -pub fn main() { - unsafe { - let base = &mut 42u64; - let r1 = &mut *(base as *mut u64); - let r2 = &mut *(base as *mut u64); - let _l = *r1; - let _l = *r2; - } -} diff --git a/tests/pass/tree-borrows/tree-borrows.rs b/tests/pass/tree-borrows/tree-borrows.rs new file mode 100644 index 0000000000..aa6f707889 --- /dev/null +++ b/tests/pass/tree-borrows/tree-borrows.rs @@ -0,0 +1,280 @@ +//@compile-flags: -Zmiri-tree-borrows +#![feature(allocator_api)] + +use std::mem; +use std::ptr; + +fn main() { + aliasing_read_only_mutable_refs(); + string_as_mut_ptr(); + + // Stacked Borrows tests + read_does_not_invalidate1(); + read_does_not_invalidate2(); + mut_raw_then_mut_shr(); + mut_shr_then_mut_raw(); + mut_raw_mut(); + partially_invalidate_mut(); + drop_after_sharing(); + direct_mut_to_const_raw(); + two_raw(); + shr_and_raw(); + disjoint_mutable_subborrows(); + raw_ref_to_part(); + array_casts(); + mut_below_shr(); + wide_raw_ptr_in_tuple(); + not_unpin_not_protected(); +} + +// Tree Borrows has no issue with several mutable references existing +// at the same time, as long as they are used only immutably. +// I.e. multiple Reserved can coexist. +pub fn aliasing_read_only_mutable_refs() { + unsafe { + let base = &mut 42u64; + let r1 = &mut *(base as *mut u64); + let r2 = &mut *(base as *mut u64); + let _l = *r1; + let _l = *r2; + } +} + +pub fn string_as_mut_ptr() { + // This errors in Stacked Borrows since as_mut_ptr restricts the provenance, + // but with Tree Borrows it should work. + unsafe { + let mut s = String::from("hello"); + s.reserve(1); // make the `str` that `s` derefs to not cover the entire `s`. + + // Prevent automatically dropping the String's data + let mut s = mem::ManuallyDrop::new(s); + + let ptr = s.as_mut_ptr(); + let len = s.len(); + let capacity = s.capacity(); + + let s = String::from_raw_parts(ptr, len, capacity); + + assert_eq!(String::from("hello"), s); + } +} + +// ----- The tests below were taken from Stacked Borrows ---- + +// Make sure that reading from an `&mut` does, like reborrowing to `&`, +// NOT invalidate other reborrows. +fn read_does_not_invalidate1() { + fn foo(x: &mut (i32, i32)) -> &i32 { + let xraw = x as *mut (i32, i32); + let ret = unsafe { &(*xraw).1 }; + let _val = x.1; // we just read, this does NOT invalidate the reborrows. + ret + } + assert_eq!(*foo(&mut (1, 2)), 2); +} +// Same as above, but this time we first create a raw, then read from `&mut` +// and then freeze from the raw. +fn read_does_not_invalidate2() { + fn foo(x: &mut (i32, i32)) -> &i32 { + let xraw = x as *mut (i32, i32); + let _val = x.1; // we just read, this does NOT invalidate the raw reborrow. + let ret = unsafe { &(*xraw).1 }; + ret + } + assert_eq!(*foo(&mut (1, 2)), 2); +} + +// Escape a mut to raw, then share the same mut and use the share, then the raw. +// That should work. +fn mut_raw_then_mut_shr() { + let mut x = 2; + let xref = &mut x; + let xraw = &mut *xref as *mut _; + let xshr = &*xref; + assert_eq!(*xshr, 2); + unsafe { + *xraw = 4; + } + assert_eq!(x, 4); +} + +// Create first a shared reference and then a raw pointer from a `&mut` +// should permit mutation through that raw pointer. +fn mut_shr_then_mut_raw() { + let xref = &mut 2; + let _xshr = &*xref; + let xraw = xref as *mut _; + unsafe { + *xraw = 3; + } + assert_eq!(*xref, 3); +} + +// Ensure that if we derive from a mut a raw, and then from that a mut, +// and then read through the original mut, that does not invalidate the raw. +// This shows that the read-exception for `&mut` applies even if the `Shr` item +// on the stack is not at the top. +fn mut_raw_mut() { + let mut x = 2; + { + let xref1 = &mut x; + let xraw = xref1 as *mut _; + let _xref2 = unsafe { &mut *xraw }; + let _val = *xref1; + unsafe { + *xraw = 4; + } + // we can now use both xraw and xref1, for reading + assert_eq!(*xref1, 4); + assert_eq!(unsafe { *xraw }, 4); + assert_eq!(*xref1, 4); + assert_eq!(unsafe { *xraw }, 4); + // we cannot use xref2; see `compile-fail/stacked-borrows/illegal_read4.rs` + } + assert_eq!(x, 4); +} + +fn partially_invalidate_mut() { + let data = &mut (0u8, 0u8); + let reborrow = &mut *data as *mut (u8, u8); + let shard = unsafe { &mut (*reborrow).0 }; + data.1 += 1; // the deref overlaps with `shard`, but that is ok; the access does not overlap. + *shard += 1; // so we can still use `shard`. + assert_eq!(*data, (1, 1)); +} + +// Make sure that we can handle the situation where a location is frozen when being dropped. +fn drop_after_sharing() { + let x = String::from("hello!"); + let _len = x.len(); +} + +// Make sure that coercing &mut T to *const T produces a writeable pointer. +fn direct_mut_to_const_raw() { + // TODO: This is currently disabled, waiting on a decision on + /*let x = &mut 0; + let y: *const i32 = x; + unsafe { *(y as *mut i32) = 1; } + assert_eq!(*x, 1); + */ +} + +// Make sure that we can create two raw pointers from a mutable reference and use them both. +fn two_raw() { + unsafe { + let x = &mut 0; + let y1 = x as *mut _; + let y2 = x as *mut _; + *y1 += 2; + *y2 += 1; + } +} + +// Make sure that creating a *mut does not invalidate existing shared references. +fn shr_and_raw() { + unsafe { + let x = &mut 0; + let y1: &i32 = mem::transmute(&*x); // launder lifetimes + let y2 = x as *mut _; + let _val = *y1; + *y2 += 1; + } +} + +fn disjoint_mutable_subborrows() { + struct Foo { + a: String, + b: Vec, + } + + unsafe fn borrow_field_a<'a>(this: *mut Foo) -> &'a mut String { + &mut (*this).a + } + + unsafe fn borrow_field_b<'a>(this: *mut Foo) -> &'a mut Vec { + &mut (*this).b + } + + let mut foo = Foo { a: "hello".into(), b: vec![0, 1, 2] }; + + let ptr = &mut foo as *mut Foo; + + let a = unsafe { borrow_field_a(ptr) }; + let b = unsafe { borrow_field_b(ptr) }; + b.push(4); + a.push_str(" world"); + assert_eq!(format!("{:?} {:?}", a, b), r#""hello world" [0, 1, 2, 4]"#); +} + +fn raw_ref_to_part() { + struct Part { + _lame: i32, + } + + #[repr(C)] + struct Whole { + part: Part, + extra: i32, + } + + let it = Box::new(Whole { part: Part { _lame: 0 }, extra: 42 }); + let whole = ptr::addr_of_mut!(*Box::leak(it)); + let part = unsafe { ptr::addr_of_mut!((*whole).part) }; + let typed = unsafe { &mut *(part as *mut Whole) }; + assert!(typed.extra == 42); + drop(unsafe { Box::from_raw(whole) }); +} + +/// When casting an array reference to a raw element ptr, that should cover the whole array. +fn array_casts() { + let mut x: [usize; 2] = [0, 0]; + let p = &mut x as *mut usize; + unsafe { + *p.add(1) = 1; + } + + let x: [usize; 2] = [0, 1]; + let p = &x as *const usize; + assert_eq!(unsafe { *p.add(1) }, 1); +} + +/// Transmuting &&i32 to &&mut i32 is fine. +fn mut_below_shr() { + let x = 0; + let y = &x; + let p = unsafe { core::mem::transmute::<&&i32, &&mut i32>(&y) }; + let r = &**p; + let _val = *r; +} + +fn wide_raw_ptr_in_tuple() { + let mut x: Box = Box::new("ouch"); + let r = &mut *x as *mut dyn std::any::Any; + // This triggers the visitor-based recursive retagging. It is *not* supposed to retag raw + // pointers, but then the visitor might recurse into the "fields" of a wide raw pointer and + // finds a reference (to a vtable) there that it wants to retag... and that would be Wrong. + let pair = (r, &0); + let r = unsafe { &mut *pair.0 }; + // Make sure the fn ptr part of the vtable is still fine. + r.type_id(); +} + +fn not_unpin_not_protected() { + // `&mut !Unpin`, at least for now, does not get `noalias` nor `dereferenceable`, so we also + // don't add protectors. (We could, but until we have a better idea for where we want to go with + // the self-referential-generator situation, it does not seem worth the potential trouble.) + use std::marker::PhantomPinned; + + pub struct NotUnpin(i32, PhantomPinned); + + fn inner(x: &mut NotUnpin, f: fn(&mut NotUnpin)) { + // `f` may mutate, but it may not deallocate! + f(x) + } + + inner(Box::leak(Box::new(NotUnpin(0, PhantomPinned))), |x| { + let raw = x as *mut _; + drop(unsafe { Box::from_raw(raw) }); + }); +}