diff --git a/rust-version b/rust-version index a11d33b3d3..3b6723b944 100644 --- a/rust-version +++ b/rust-version @@ -1 +1 @@ -a2e29d67c26bdf8f278c98ee02d6cc77a279ed2e +12813159a985d87a98578e05cc39200e4e8c2102 diff --git a/src/data_race.rs b/src/data_race.rs index aec22dadc1..44cce53957 100644 --- a/src/data_race.rs +++ b/src/data_race.rs @@ -9,6 +9,9 @@ //! Relaxed stores now unconditionally block all currently active release sequences and so per-thread tracking of release //! sequences is not needed. //! +//! The implementation also models races with memory allocation and deallocation via treating allocation and +//! deallocation as a type of write internally for detecting data-races. +//! //! This does not explore weak memory orders and so can still miss data-races //! but should not report false-positives //! @@ -73,7 +76,7 @@ use rustc_target::abi::Size; use crate::{ ImmTy, Immediate, InterpResult, MPlaceTy, MemPlaceMeta, MiriEvalContext, MiriEvalContextExt, OpTy, Pointer, RangeMap, ScalarMaybeUninit, Tag, ThreadId, VClock, VTimestamp, - VectorIdx, + VectorIdx, MemoryKind, MiriMemoryKind }; pub type AllocExtra = VClockAlloc; @@ -192,6 +195,34 @@ struct AtomicMemoryCellClocks { sync_vector: VClock, } +/// Type of write operation: allocating memory +/// non-atomic writes and deallocating memory +/// are all treated as writes for the purpose +/// of the data-race detector. +#[derive(Copy, Clone, PartialEq, Eq, Debug)] +enum WriteType { + /// Allocate memory. + Allocate, + + /// Standard unsynchronized write. + Write, + + /// Deallocate memory. + /// Note that when memory is deallocated first, later non-atomic accesses + /// will be reported as use-after-free, not as data races. + /// (Same for `Allocate` above.) + Deallocate, +} +impl WriteType { + fn get_descriptor(self) -> &'static str { + match self { + WriteType::Allocate => "Allocate", + WriteType::Write => "Write", + WriteType::Deallocate => "Deallocate", + } + } +} + /// Memory Cell vector clock metadata /// for data-race detection. #[derive(Clone, PartialEq, Eq, Debug)] @@ -204,6 +235,11 @@ struct MemoryCellClocks { /// that performed the last write operation. write_index: VectorIdx, + /// The type of operation that the write index represents, + /// either newly allocated memory, a non-atomic write or + /// a deallocation of memory. + write_type: WriteType, + /// The vector-clock of the timestamp of the last read operation /// performed by a thread since the last write operation occurred. /// It is reset to zero on each write operation. @@ -215,20 +251,18 @@ struct MemoryCellClocks { atomic_ops: Option>, } -/// Create a default memory cell clocks instance -/// for uninitialized memory. -impl Default for MemoryCellClocks { - fn default() -> Self { +impl MemoryCellClocks { + /// Create a new set of clocks representing memory allocated + /// at a given vector timestamp and index. + fn new(alloc: VTimestamp, alloc_index: VectorIdx) -> Self { MemoryCellClocks { read: VClock::default(), - write: 0, - write_index: VectorIdx::MAX_INDEX, + write: alloc, + write_index: alloc_index, + write_type: WriteType::Allocate, atomic_ops: None, } } -} - -impl MemoryCellClocks { /// Load the internal atomic memory cells if they exist. #[inline] @@ -382,6 +416,7 @@ impl MemoryCellClocks { &mut self, clocks: &ThreadClockSet, index: VectorIdx, + write_type: WriteType, ) -> Result<(), DataRace> { log::trace!("Unsynchronized write with vectors: {:#?} :: {:#?}", self, clocks); if self.write <= clocks.clock[self.write_index] && self.read <= clocks.clock { @@ -393,6 +428,7 @@ impl MemoryCellClocks { if race_free { self.write = clocks.clock[index]; self.write_index = index; + self.write_type = write_type; self.read.set_zero_vector(); Ok(()) } else { @@ -638,6 +674,21 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> { Ok(()) } } + + fn reset_vector_clocks( + &mut self, + ptr: Pointer, + size: Size + ) -> InterpResult<'tcx> { + let this = self.eval_context_mut(); + if let Some(data_race) = &mut this.memory.extra.data_race { + if data_race.multi_threaded.get() { + let alloc_meta = this.memory.get_raw_mut(ptr.alloc_id)?.extra.data_race.as_mut().unwrap(); + alloc_meta.reset_clocks(ptr.offset, size); + } + } + Ok(()) + } } /// Vector clock metadata for a logical memory allocation. @@ -646,22 +697,50 @@ pub struct VClockAlloc { /// Assigning each byte a MemoryCellClocks. alloc_ranges: RefCell>, - // Pointer to global state. + /// Pointer to global state. global: MemoryExtra, } impl VClockAlloc { - /// Create a new data-race allocation detector. - pub fn new_allocation(global: &MemoryExtra, len: Size) -> VClockAlloc { + /// Create a new data-race detector for newly allocated memory. + pub fn new_allocation(global: &MemoryExtra, len: Size, kind: MemoryKind) -> VClockAlloc { + let (alloc_timestamp, alloc_index) = match kind { + // User allocated and stack memory should track allocation. + MemoryKind::Machine( + MiriMemoryKind::Rust | MiriMemoryKind::C | MiriMemoryKind::WinHeap + ) | MemoryKind::Stack => { + let (alloc_index, clocks) = global.current_thread_state(); + let alloc_timestamp = clocks.clock[alloc_index]; + (alloc_timestamp, alloc_index) + } + // Other global memory should trace races but be allocated at the 0 timestamp. + MemoryKind::Machine( + MiriMemoryKind::Global | MiriMemoryKind::Machine | MiriMemoryKind::Env | + MiriMemoryKind::ExternStatic | MiriMemoryKind::Tls + ) | MemoryKind::CallerLocation | MemoryKind::Vtable => { + (0, VectorIdx::MAX_INDEX) + } + }; VClockAlloc { global: Rc::clone(global), - alloc_ranges: RefCell::new(RangeMap::new(len, MemoryCellClocks::default())), + alloc_ranges: RefCell::new(RangeMap::new( + len, MemoryCellClocks::new(alloc_timestamp, alloc_index) + )), + } + } + + fn reset_clocks(&mut self, offset: Size, len: Size) { + let mut alloc_ranges = self.alloc_ranges.borrow_mut(); + for (_, range) in alloc_ranges.iter_mut(offset, len) { + // Reset the portion of the range + *range = MemoryCellClocks::new(0, VectorIdx::MAX_INDEX); } } // Find an index, if one exists where the value // in `l` is greater than the value in `r`. fn find_gt_index(l: &VClock, r: &VClock) -> Option { + log::trace!("Find index where not {:?} <= {:?}", l, r); let l_slice = l.as_slice(); let r_slice = r.as_slice(); l_slice @@ -681,7 +760,7 @@ impl VClockAlloc { .enumerate() .find_map(|(idx, &r)| if r == 0 { None } else { Some(idx) }) .expect("Invalid VClock Invariant"); - Some(idx) + Some(idx + r_slice.len()) } else { None } @@ -712,18 +791,18 @@ impl VClockAlloc { // Convert the write action into the vector clock it // represents for diagnostic purposes. write_clock = VClock::new_with_index(range.write_index, range.write); - ("WRITE", range.write_index, &write_clock) + (range.write_type.get_descriptor(), range.write_index, &write_clock) } else if let Some(idx) = Self::find_gt_index(&range.read, ¤t_clocks.clock) { - ("READ", idx, &range.read) + ("Read", idx, &range.read) } else if !is_atomic { if let Some(atomic) = range.atomic() { if let Some(idx) = Self::find_gt_index(&atomic.write_vector, ¤t_clocks.clock) { - ("ATOMIC_STORE", idx, &atomic.write_vector) + ("Atomic Store", idx, &atomic.write_vector) } else if let Some(idx) = Self::find_gt_index(&atomic.read_vector, ¤t_clocks.clock) { - ("ATOMIC_LOAD", idx, &atomic.read_vector) + ("Atomic Load", idx, &atomic.read_vector) } else { unreachable!( "Failed to report data-race for non-atomic operation: no race found" @@ -774,7 +853,7 @@ impl VClockAlloc { return Self::report_data_race( &self.global, range, - "READ", + "Read", false, pointer, len, @@ -792,17 +871,17 @@ impl VClockAlloc { &mut self, pointer: Pointer, len: Size, - action: &str, + write_type: WriteType, ) -> InterpResult<'tcx> { if self.global.multi_threaded.get() { let (index, clocks) = self.global.current_thread_state(); for (_, range) in self.alloc_ranges.get_mut().iter_mut(pointer.offset, len) { - if let Err(DataRace) = range.write_race_detect(&*clocks, index) { + if let Err(DataRace) = range.write_race_detect(&*clocks, index, write_type) { // Report data-race return Self::report_data_race( &self.global, range, - action, + write_type.get_descriptor(), false, pointer, len, @@ -820,7 +899,7 @@ impl VClockAlloc { /// being created or if it is temporarily disabled during a racy read or write /// operation pub fn write<'tcx>(&mut self, pointer: Pointer, len: Size) -> InterpResult<'tcx> { - self.unique_access(pointer, len, "Write") + self.unique_access(pointer, len, WriteType::Write) } /// Detect data-races for an unsynchronized deallocate operation, will not perform @@ -828,7 +907,7 @@ impl VClockAlloc { /// being created or if it is temporarily disabled during a racy read or write /// operation pub fn deallocate<'tcx>(&mut self, pointer: Pointer, len: Size) -> InterpResult<'tcx> { - self.unique_access(pointer, len, "Deallocate") + self.unique_access(pointer, len, WriteType::Deallocate) } } @@ -1134,6 +1213,8 @@ impl GlobalState { vector_info.push(thread) }; + log::trace!("Creating thread = {:?} with vector index = {:?}", thread, created_index); + // Mark the chosen vector index as in use by the thread. thread_info[thread].vector_index = Some(created_index); diff --git a/src/machine.rs b/src/machine.rs index 4b9cad8420..e639bf450a 100644 --- a/src/machine.rs +++ b/src/machine.rs @@ -478,7 +478,7 @@ impl<'mir, 'tcx> Machine<'mir, 'tcx> for Evaluator<'mir, 'tcx> { (None, Tag::Untagged) }; let race_alloc = if let Some(data_race) = &memory_extra.data_race { - Some(data_race::AllocExtra::new_allocation(&data_race, alloc.size)) + Some(data_race::AllocExtra::new_allocation(&data_race, alloc.size, kind)) } else { None }; @@ -510,6 +510,18 @@ impl<'mir, 'tcx> Machine<'mir, 'tcx> for Evaluator<'mir, 'tcx> { Ok(()) } + + fn after_static_mem_initialized( + ecx: &mut InterpCx<'mir, 'tcx, Self>, + ptr: Pointer, + size: Size, + ) -> InterpResult<'tcx> { + if ecx.memory.extra.data_race.is_some() { + ecx.reset_vector_clocks(ptr, size)?; + } + Ok(()) + } + #[inline(always)] fn tag_global_base_pointer(memory_extra: &MemoryExtra, id: AllocId) -> Self::PointerTag { if let Some(stacked_borrows) = &memory_extra.stacked_borrows { diff --git a/tests/compile-fail/data_race/alloc_read_race.rs b/tests/compile-fail/data_race/alloc_read_race.rs new file mode 100644 index 0000000000..fc1e9d30e6 --- /dev/null +++ b/tests/compile-fail/data_race/alloc_read_race.rs @@ -0,0 +1,48 @@ +// ignore-windows: Concurrency on Windows is not supported yet. + +use std::thread::spawn; +use std::ptr::null_mut; +use std::sync::atomic::{Ordering, AtomicPtr}; +use std::mem::MaybeUninit; + +#[derive(Copy, Clone)] +struct EvilSend(pub T); + +unsafe impl Send for EvilSend {} +unsafe impl Sync for EvilSend {} + +pub fn main() { + // Shared atomic pointer + let pointer = AtomicPtr::new(null_mut::>()); + let ptr = EvilSend(&pointer as *const AtomicPtr>); + + // Note: this is scheduler-dependent + // the operations need to occur in + // order, otherwise the allocation is + // not visible to the other-thread to + // detect the race: + // 1. alloc + // 2. write + unsafe { + let j1 = spawn(move || { + // Concurrent allocate the memory. + // Uses relaxed semantics to not generate + // a release sequence. + let pointer = &*ptr.0; + pointer.store(Box::into_raw(Box::new(MaybeUninit::uninit())), Ordering::Relaxed); + }); + + let j2 = spawn(move || { + let pointer = &*ptr.0; + + // Note: could also error due to reading uninitialized memory, but the data-race detector triggers first. + *pointer.load(Ordering::Relaxed) //~ ERROR Data race detected between Read on Thread(id = 2) and Allocate on Thread(id = 1) + }); + + j1.join().unwrap(); + j2.join().unwrap(); + + // Clean up memory, will never be executed + drop(Box::from_raw(pointer.load(Ordering::Relaxed))); + } +} diff --git a/tests/compile-fail/data_race/alloc_write_race.rs b/tests/compile-fail/data_race/alloc_write_race.rs new file mode 100644 index 0000000000..d9f5af396a --- /dev/null +++ b/tests/compile-fail/data_race/alloc_write_race.rs @@ -0,0 +1,50 @@ +// ignore-windows: Concurrency on Windows is not supported yet. + +use std::thread::spawn; +use std::ptr::null_mut; +use std::sync::atomic::{Ordering, AtomicPtr}; + +#[derive(Copy, Clone)] +struct EvilSend(pub T); + +unsafe impl Send for EvilSend {} +unsafe impl Sync for EvilSend {} + +extern "C" { + fn malloc(size: usize) -> *mut u8; + fn free(ptr: *mut u8); +} + +pub fn main() { + // Shared atomic pointer + let pointer = AtomicPtr::new(null_mut::()); + let ptr = EvilSend(&pointer as *const AtomicPtr); + + // Note: this is scheduler-dependent + // the operations need to occur in + // order, otherwise the allocation is + // not visible to the other-thread to + // detect the race: + // 1. alloc + // 2. write + unsafe { + let j1 = spawn(move || { + // Concurrent allocate the memory. + // Uses relaxed semantics to not generate + // a release sequence. + let pointer = &*ptr.0; + pointer.store(malloc(std::mem::size_of::()) as *mut usize, Ordering::Relaxed); + }); + + let j2 = spawn(move || { + let pointer = &*ptr.0; + *pointer.load(Ordering::Relaxed) = 2; //~ ERROR Data race detected between Write on Thread(id = 2) and Allocate on Thread(id = 1) + }); + + j1.join().unwrap(); + j2.join().unwrap(); + + // Clean up memory, will never be executed + free(pointer.load(Ordering::Relaxed) as *mut _); + } +} diff --git a/tests/compile-fail/data_race/atomic_read_na_write_race1.rs b/tests/compile-fail/data_race/atomic_read_na_write_race1.rs index 0b9610edc6..44860ee628 100644 --- a/tests/compile-fail/data_race/atomic_read_na_write_race1.rs +++ b/tests/compile-fail/data_race/atomic_read_na_write_race1.rs @@ -22,7 +22,7 @@ pub fn main() { let j2 = spawn(move || { //Equivalent to: (&*c.0).load(Ordering::SeqCst) - atomic_load(c.0 as *mut usize) //~ ERROR Data race + atomic_load(c.0 as *mut usize) //~ ERROR Data race detected between Atomic Load on Thread(id = 2) and Write on Thread(id = 1) }); j1.join().unwrap(); diff --git a/tests/compile-fail/data_race/atomic_read_na_write_race2.rs b/tests/compile-fail/data_race/atomic_read_na_write_race2.rs index 779babefd8..6d28e18886 100644 --- a/tests/compile-fail/data_race/atomic_read_na_write_race2.rs +++ b/tests/compile-fail/data_race/atomic_read_na_write_race2.rs @@ -22,7 +22,7 @@ pub fn main() { let j2 = spawn(move || { let atomic_ref = &mut *c.0; - *atomic_ref.get_mut() = 32; //~ ERROR Data race + *atomic_ref.get_mut() = 32; //~ ERROR Data race detected between Write on Thread(id = 2) and Atomic Load on Thread(id = 1) }); j1.join().unwrap(); diff --git a/tests/compile-fail/data_race/atomic_write_na_read_race1.rs b/tests/compile-fail/data_race/atomic_write_na_read_race1.rs index 3211a5ae53..0b753f6710 100644 --- a/tests/compile-fail/data_race/atomic_write_na_read_race1.rs +++ b/tests/compile-fail/data_race/atomic_write_na_read_race1.rs @@ -22,7 +22,7 @@ pub fn main() { let j2 = spawn(move || { let atomic_ref = &mut *c.0; - *atomic_ref.get_mut() //~ ERROR Data race + *atomic_ref.get_mut() //~ ERROR Data race detected between Read on Thread(id = 2) and Atomic Store on Thread(id = 1) }); j1.join().unwrap(); diff --git a/tests/compile-fail/data_race/atomic_write_na_read_race2.rs b/tests/compile-fail/data_race/atomic_write_na_read_race2.rs index 131d4e07b8..a9f5fb2fe5 100644 --- a/tests/compile-fail/data_race/atomic_write_na_read_race2.rs +++ b/tests/compile-fail/data_race/atomic_write_na_read_race2.rs @@ -22,7 +22,7 @@ pub fn main() { let j2 = spawn(move || { //Equivalent to: (&*c.0).store(32, Ordering::SeqCst) - atomic_store(c.0 as *mut usize, 32); //~ ERROR Data race + atomic_store(c.0 as *mut usize, 32); //~ ERROR Data race detected between Atomic Store on Thread(id = 2) and Read on Thread(id = 1) }); j1.join().unwrap(); diff --git a/tests/compile-fail/data_race/atomic_write_na_write_race1.rs b/tests/compile-fail/data_race/atomic_write_na_write_race1.rs index 74adf7ae4b..d5a828fa6e 100644 --- a/tests/compile-fail/data_race/atomic_write_na_write_race1.rs +++ b/tests/compile-fail/data_race/atomic_write_na_write_race1.rs @@ -22,7 +22,7 @@ pub fn main() { let j2 = spawn(move || { //Equivalent to: (&*c.0).store(64, Ordering::SeqCst) - atomic_store(c.0 as *mut usize, 64); //~ ERROR Data race + atomic_store(c.0 as *mut usize, 64); //~ ERROR Data race detected between Atomic Store on Thread(id = 2) and Write on Thread(id = 1) }); j1.join().unwrap(); diff --git a/tests/compile-fail/data_race/atomic_write_na_write_race2.rs b/tests/compile-fail/data_race/atomic_write_na_write_race2.rs index 75ad755fbd..9812dcd799 100644 --- a/tests/compile-fail/data_race/atomic_write_na_write_race2.rs +++ b/tests/compile-fail/data_race/atomic_write_na_write_race2.rs @@ -22,7 +22,7 @@ pub fn main() { let j2 = spawn(move || { let atomic_ref = &mut *c.0; - *atomic_ref.get_mut() = 32; //~ ERROR Data race + *atomic_ref.get_mut() = 32; //~ ERROR Data race detected between Write on Thread(id = 2) and Atomic Store on Thread(id = 1) }); j1.join().unwrap(); diff --git a/tests/compile-fail/data_race/dangling_thread_async_race.rs b/tests/compile-fail/data_race/dangling_thread_async_race.rs index d8b5d82f83..ad539ec5b0 100644 --- a/tests/compile-fail/data_race/dangling_thread_async_race.rs +++ b/tests/compile-fail/data_race/dangling_thread_async_race.rs @@ -24,9 +24,9 @@ fn main() { }) }; - // Detatch the thread and sleep until it terminates + // Detach the thread and sleep until it terminates mem::drop(join); - sleep(Duration::from_millis(100)); + sleep(Duration::from_millis(200)); // Spawn and immediately join a thread // to execute the join code-path @@ -36,7 +36,7 @@ fn main() { let join2 = unsafe { spawn(move || { - *c.0 = 64; //~ ERROR Data race + *c.0 = 64; //~ ERROR Data race detected between Write on Thread(id = 3) and Write on Thread(id = 1) }) }; diff --git a/tests/compile-fail/data_race/dangling_thread_race.rs b/tests/compile-fail/data_race/dangling_thread_race.rs index 172b05bd4f..755ba8efda 100644 --- a/tests/compile-fail/data_race/dangling_thread_race.rs +++ b/tests/compile-fail/data_race/dangling_thread_race.rs @@ -24,9 +24,9 @@ fn main() { }) }; - // Detatch the thread and sleep until it terminates + // Detach the thread and sleep until it terminates mem::drop(join); - sleep(Duration::from_millis(100)); + sleep(Duration::from_millis(200)); // Spawn and immediately join a thread // to execute the join code-path @@ -36,6 +36,6 @@ fn main() { unsafe { - *c.0 = 64; //~ ERROR Data race + *c.0 = 64; //~ ERROR Data race detected between Write on Thread(id = 0, name = "main") and Write on Thread(id = 1) } } diff --git a/tests/compile-fail/data_race/dealloc_read_race1.rs b/tests/compile-fail/data_race/dealloc_read_race1.rs new file mode 100644 index 0000000000..14b02e95cc --- /dev/null +++ b/tests/compile-fail/data_race/dealloc_read_race1.rs @@ -0,0 +1,32 @@ +// ignore-windows: Concurrency on Windows is not supported yet. + +use std::thread::spawn; + +#[derive(Copy, Clone)] +struct EvilSend(pub T); + +unsafe impl Send for EvilSend {} +unsafe impl Sync for EvilSend {} + +extern "Rust" { + fn __rust_dealloc(ptr: *mut u8, size: usize, align: usize); +} + +pub fn main() { + // Shared atomic pointer + let pointer: *mut usize = Box::into_raw(Box::new(0usize)); + let ptr = EvilSend(pointer); + + unsafe { + let j1 = spawn(move || { + *ptr.0 + }); + + let j2 = spawn(move || { + __rust_dealloc(ptr.0 as *mut _, std::mem::size_of::(), std::mem::align_of::()); //~ ERROR Data race detected between Deallocate on Thread(id = 2) and Read on Thread(id = 1) + }); + + j1.join().unwrap(); + j2.join().unwrap(); + } +} diff --git a/tests/compile-fail/data_race/dealloc_read_race2.rs b/tests/compile-fail/data_race/dealloc_read_race2.rs new file mode 100644 index 0000000000..a4bf210ef4 --- /dev/null +++ b/tests/compile-fail/data_race/dealloc_read_race2.rs @@ -0,0 +1,34 @@ +// ignore-windows: Concurrency on Windows is not supported yet. + +use std::thread::spawn; + +#[derive(Copy, Clone)] +struct EvilSend(pub T); + +unsafe impl Send for EvilSend {} +unsafe impl Sync for EvilSend {} + +extern "Rust" { + fn __rust_dealloc(ptr: *mut u8, size: usize, align: usize); +} + +pub fn main() { + // Shared atomic pointer + let pointer: *mut usize = Box::into_raw(Box::new(0usize)); + let ptr = EvilSend(pointer); + + unsafe { + let j1 = spawn(move || { + __rust_dealloc(ptr.0 as *mut _, std::mem::size_of::(), std::mem::align_of::()) + }); + + let j2 = spawn(move || { + // Also an error of the form: Data race detected between Read on Thread(id = 2) and Deallocate on Thread(id = 1) + // but the invalid allocation is detected first. + *ptr.0 //~ ERROR dereferenced after this allocation got freed + }); + + j1.join().unwrap(); + j2.join().unwrap(); + } +} diff --git a/tests/compile-fail/data_race/dealloc_read_race_stack.rs b/tests/compile-fail/data_race/dealloc_read_race_stack.rs new file mode 100644 index 0000000000..31960fb216 --- /dev/null +++ b/tests/compile-fail/data_race/dealloc_read_race_stack.rs @@ -0,0 +1,52 @@ +// ignore-windows: Concurrency on Windows is not supported yet. +// compile-flags: -Zmiri-disable-isolation + +use std::thread::{spawn, sleep}; +use std::ptr::null_mut; +use std::sync::atomic::{Ordering, AtomicPtr}; +use std::time::Duration; + +#[derive(Copy, Clone)] +struct EvilSend(pub T); + +unsafe impl Send for EvilSend {} +unsafe impl Sync for EvilSend {} + +pub fn main() { + // Shared atomic pointer + let pointer = AtomicPtr::new(null_mut::()); + let ptr = EvilSend(&pointer as *const AtomicPtr); + + // Note: this is scheduler-dependent + // the operations need to occur in + // order, otherwise the allocation is + // not visible to the other-thread to + // detect the race: + // 1. stack-allocate + // 2. read + // 3. stack-deallocate + unsafe { + let j1 = spawn(move || { + // Concurrent allocate the memory. + // Uses relaxed semantics to not generate + // a release sequence. + let pointer = &*ptr.0; + { + let mut stack_var = 0usize; + + pointer.store(&mut stack_var as *mut _, Ordering::Release); + + sleep(Duration::from_millis(200)); + + } //~ ERROR Data race detected between Deallocate on Thread(id = 1) and Read on Thread(id = 2) + }); + + let j2 = spawn(move || { + let pointer = &*ptr.0; + *pointer.load(Ordering::Acquire) + }); + + j1.join().unwrap(); + j2.join().unwrap(); + } +} diff --git a/tests/compile-fail/data_race/dealloc_read_race_stack_drop.rs b/tests/compile-fail/data_race/dealloc_read_race_stack_drop.rs new file mode 100644 index 0000000000..44950a34db --- /dev/null +++ b/tests/compile-fail/data_race/dealloc_read_race_stack_drop.rs @@ -0,0 +1,52 @@ +// ignore-windows: Concurrency on Windows is not supported yet. +// compile-flags: -Zmiri-disable-isolation + +use std::thread::{spawn, sleep}; +use std::ptr::null_mut; +use std::sync::atomic::{Ordering, AtomicPtr}; +use std::time::Duration; + +#[derive(Copy, Clone)] +struct EvilSend(pub T); + +unsafe impl Send for EvilSend {} +unsafe impl Sync for EvilSend {} + +pub fn main() { + // Shared atomic pointer + let pointer = AtomicPtr::new(null_mut::()); + let ptr = EvilSend(&pointer as *const AtomicPtr); + + // Note: this is scheduler-dependent + // the operations need to occur in + // order, otherwise the allocation is + // not visible to the other-thread to + // detect the race: + // 1. stack-allocate + // 2. read + // 3. stack-deallocate + unsafe { + let j1 = spawn(move || { + // Concurrent allocate the memory. + // Uses relaxed semantics to not generate + // a release sequence. + let pointer = &*ptr.0; + + let mut stack_var = 0usize; + + pointer.store(&mut stack_var as *mut _, Ordering::Release); + + sleep(Duration::from_millis(200)); + + drop(stack_var); + }); //~ ERROR Data race detected between Deallocate on Thread(id = 1) and Read on Thread(id = 2) + + let j2 = spawn(move || { + let pointer = &*ptr.0; + *pointer.load(Ordering::Acquire) + }); + + j1.join().unwrap(); + j2.join().unwrap(); + } +} diff --git a/tests/compile-fail/data_race/dealloc_write_race1.rs b/tests/compile-fail/data_race/dealloc_write_race1.rs new file mode 100644 index 0000000000..edcdfffdb5 --- /dev/null +++ b/tests/compile-fail/data_race/dealloc_write_race1.rs @@ -0,0 +1,31 @@ +// ignore-windows: Concurrency on Windows is not supported yet. + +use std::thread::spawn; + +#[derive(Copy, Clone)] +struct EvilSend(pub T); + +unsafe impl Send for EvilSend {} +unsafe impl Sync for EvilSend {} + +extern "Rust" { + fn __rust_dealloc(ptr: *mut u8, size: usize, align: usize); +} +pub fn main() { + // Shared atomic pointer + let pointer: *mut usize = Box::into_raw(Box::new(0usize)); + let ptr = EvilSend(pointer); + + unsafe { + let j1 = spawn(move || { + *ptr.0 = 2; + }); + + let j2 = spawn(move || { + __rust_dealloc(ptr.0 as *mut _, std::mem::size_of::(), std::mem::align_of::()); //~ ERROR Data race detected between Deallocate on Thread(id = 2) and Write on Thread(id = 1) + }); + + j1.join().unwrap(); + j2.join().unwrap(); + } +} diff --git a/tests/compile-fail/data_race/dealloc_write_race2.rs b/tests/compile-fail/data_race/dealloc_write_race2.rs new file mode 100644 index 0000000000..20c05fa8f1 --- /dev/null +++ b/tests/compile-fail/data_race/dealloc_write_race2.rs @@ -0,0 +1,33 @@ +// ignore-windows: Concurrency on Windows is not supported yet. + +use std::thread::spawn; + +#[derive(Copy, Clone)] +struct EvilSend(pub T); + +unsafe impl Send for EvilSend {} +unsafe impl Sync for EvilSend {} + +extern "Rust" { + fn __rust_dealloc(ptr: *mut u8, size: usize, align: usize); +} +pub fn main() { + // Shared atomic pointer + let pointer: *mut usize = Box::into_raw(Box::new(0usize)); + let ptr = EvilSend(pointer); + + unsafe { + let j1 = spawn(move || { + __rust_dealloc(ptr.0 as *mut _, std::mem::size_of::(), std::mem::align_of::()); + }); + + let j2 = spawn(move || { + // Also an error of the form: Data race detected between Write on Thread(id = 2) and Deallocate on Thread(id = 1) + // but the invalid allocation is detected first. + *ptr.0 = 2; //~ ERROR dereferenced after this allocation got freed + }); + + j1.join().unwrap(); + j2.join().unwrap(); + } +} diff --git a/tests/compile-fail/data_race/dealloc_write_race_stack.rs b/tests/compile-fail/data_race/dealloc_write_race_stack.rs new file mode 100644 index 0000000000..25dea65fe7 --- /dev/null +++ b/tests/compile-fail/data_race/dealloc_write_race_stack.rs @@ -0,0 +1,52 @@ +// ignore-windows: Concurrency on Windows is not supported yet. +// compile-flags: -Zmiri-disable-isolation + +use std::thread::{spawn, sleep}; +use std::ptr::null_mut; +use std::sync::atomic::{Ordering, AtomicPtr}; +use std::time::Duration; + +#[derive(Copy, Clone)] +struct EvilSend(pub T); + +unsafe impl Send for EvilSend {} +unsafe impl Sync for EvilSend {} + +pub fn main() { + // Shared atomic pointer + let pointer = AtomicPtr::new(null_mut::()); + let ptr = EvilSend(&pointer as *const AtomicPtr); + + // Note: this is scheduler-dependent + // the operations need to occur in + // order, otherwise the allocation is + // not visible to the other-thread to + // detect the race: + // 1. stack-allocate + // 2. read + // 3. stack-deallocate + unsafe { + let j1 = spawn(move || { + // Concurrent allocate the memory. + // Uses relaxed semantics to not generate + // a release sequence. + let pointer = &*ptr.0; + { + let mut stack_var = 0usize; + + pointer.store(&mut stack_var as *mut _, Ordering::Release); + + sleep(Duration::from_millis(200)); + + } //~ ERROR Data race detected between Deallocate on Thread(id = 1) and Write on Thread(id = 2) + }); + + let j2 = spawn(move || { + let pointer = &*ptr.0; + *pointer.load(Ordering::Acquire) = 3; + }); + + j1.join().unwrap(); + j2.join().unwrap(); + } +} diff --git a/tests/compile-fail/data_race/dealloc_write_race_stack_drop.rs b/tests/compile-fail/data_race/dealloc_write_race_stack_drop.rs new file mode 100644 index 0000000000..1d239e9eb7 --- /dev/null +++ b/tests/compile-fail/data_race/dealloc_write_race_stack_drop.rs @@ -0,0 +1,53 @@ +// ignore-windows: Concurrency on Windows is not supported yet. +// compile-flags: -Zmiri-disable-isolation + +use std::thread::{spawn, sleep}; +use std::ptr::null_mut; +use std::sync::atomic::{Ordering, AtomicPtr}; +use std::time::Duration; + +#[derive(Copy, Clone)] +struct EvilSend(pub T); + +unsafe impl Send for EvilSend {} +unsafe impl Sync for EvilSend {} + +pub fn main() { + // Shared atomic pointer + let pointer = AtomicPtr::new(null_mut::()); + let ptr = EvilSend(&pointer as *const AtomicPtr); + + // Note: this is scheduler-dependent + // the operations need to occur in + // order, otherwise the allocation is + // not visible to the other-thread to + // detect the race: + // 1. stack-allocate + // 2. read + // 3. stack-deallocate + unsafe { + let j1 = spawn(move || { + // Concurrent allocate the memory. + // Uses relaxed semantics to not generate + // a release sequence. + let pointer = &*ptr.0; + + let mut stack_var = 0usize; + + pointer.store(&mut stack_var as *mut _, Ordering::Release); + + sleep(Duration::from_millis(200)); + + // Note: Implicit read for drop(_) races with write, would detect race with deallocate after. + drop(stack_var); //~ ERROR Data race detected between Read on Thread(id = 1) and Write on Thread(id = 2) + }); + + let j2 = spawn(move || { + let pointer = &*ptr.0; + *pointer.load(Ordering::Acquire) = 3; + }); + + j1.join().unwrap(); + j2.join().unwrap(); + } +} diff --git a/tests/compile-fail/data_race/enable_after_join_to_main.rs b/tests/compile-fail/data_race/enable_after_join_to_main.rs index c294317771..832158a34a 100644 --- a/tests/compile-fail/data_race/enable_after_join_to_main.rs +++ b/tests/compile-fail/data_race/enable_after_join_to_main.rs @@ -29,7 +29,7 @@ pub fn main() { }); let j2 = spawn(move || { - *c.0 = 64; //~ ERROR Data race + *c.0 = 64; //~ ERROR Data race detected between Write on Thread(id = 6) and Write on Thread(id = 5) }); j1.join().unwrap(); diff --git a/tests/compile-fail/data_race/read_write_race.rs b/tests/compile-fail/data_race/read_write_race.rs index 42fd7a51ff..0df66d66ad 100644 --- a/tests/compile-fail/data_race/read_write_race.rs +++ b/tests/compile-fail/data_race/read_write_race.rs @@ -18,7 +18,7 @@ pub fn main() { }); let j2 = spawn(move || { - *c.0 = 64; //~ ERROR Data race + *c.0 = 64; //~ ERROR Data race detected between Write on Thread(id = 2) and Read on Thread(id = 1) }); j1.join().unwrap(); diff --git a/tests/compile-fail/data_race/read_write_race_stack.rs b/tests/compile-fail/data_race/read_write_race_stack.rs new file mode 100644 index 0000000000..0cf915cdef --- /dev/null +++ b/tests/compile-fail/data_race/read_write_race_stack.rs @@ -0,0 +1,57 @@ +// ignore-windows: Concurrency on Windows is not supported yet. +// compile-flags: -Zmiri-disable-isolation -Zmir-opt-level=0 + +// Note: mir-opt-level set to 0 to prevent the read of stack_var in thread 1 +// from being optimized away and preventing the detection of the data-race. + +use std::thread::{spawn, sleep}; +use std::ptr::null_mut; +use std::sync::atomic::{Ordering, AtomicPtr}; +use std::time::Duration; + +#[derive(Copy, Clone)] +struct EvilSend(pub T); + +unsafe impl Send for EvilSend {} +unsafe impl Sync for EvilSend {} + +pub fn main() { + // Shared atomic pointer + let pointer = AtomicPtr::new(null_mut::()); + let ptr = EvilSend(&pointer as *const AtomicPtr); + + // Note: this is scheduler-dependent + // the operations need to occur in + // order, otherwise the allocation is + // not visible to the other-thread to + // detect the race: + // 1. stack-allocate + // 2. atomic_store + // 3. atomic_load + // 4. write-value + // 5. read-value + unsafe { + let j1 = spawn(move || { + // Concurrent allocate the memory. + // Uses relaxed semantics to not generate + // a release sequence. + let pointer = &*ptr.0; + + let mut stack_var = 0usize; + + pointer.store(&mut stack_var as *mut _, Ordering::Release); + + sleep(Duration::from_millis(200)); + + stack_var //~ ERROR Data race detected between Read on Thread(id = 1) and Write on Thread(id = 2) + }); + + let j2 = spawn(move || { + let pointer = &*ptr.0; + *pointer.load(Ordering::Acquire) = 3; + }); + + j1.join().unwrap(); + j2.join().unwrap(); + } +} diff --git a/tests/compile-fail/data_race/relax_acquire_race.rs b/tests/compile-fail/data_race/relax_acquire_race.rs index 2ae0aacbcf..8b8616431f 100644 --- a/tests/compile-fail/data_race/relax_acquire_race.rs +++ b/tests/compile-fail/data_race/relax_acquire_race.rs @@ -37,7 +37,7 @@ pub fn main() { let j3 = spawn(move || { if SYNC.load(Ordering::Acquire) == 2 { - *c.0 //~ ERROR Data race + *c.0 //~ ERROR Data race detected between Read on Thread(id = 3) and Write on Thread(id = 1) } else { 0 } diff --git a/tests/compile-fail/data_race/release_seq_race.rs b/tests/compile-fail/data_race/release_seq_race.rs index 59263cb712..29c428b388 100644 --- a/tests/compile-fail/data_race/release_seq_race.rs +++ b/tests/compile-fail/data_race/release_seq_race.rs @@ -30,7 +30,7 @@ pub fn main() { let j1 = spawn(move || { *c.0 = 1; SYNC.store(1, Ordering::Release); - sleep(Duration::from_millis(100)); + sleep(Duration::from_millis(200)); SYNC.store(3, Ordering::Relaxed); }); @@ -40,9 +40,9 @@ pub fn main() { }); let j3 = spawn(move || { - sleep(Duration::from_millis(1000)); + sleep(Duration::from_millis(500)); if SYNC.load(Ordering::Acquire) == 3 { - *c.0 //~ ERROR Data race + *c.0 //~ ERROR Data race detected between Read on Thread(id = 3) and Write on Thread(id = 1) } else { 0 } diff --git a/tests/compile-fail/data_race/release_seq_race_same_thread.rs b/tests/compile-fail/data_race/release_seq_race_same_thread.rs index d64b80dbd8..54b9f49937 100644 --- a/tests/compile-fail/data_race/release_seq_race_same_thread.rs +++ b/tests/compile-fail/data_race/release_seq_race_same_thread.rs @@ -38,7 +38,7 @@ pub fn main() { let j2 = spawn(move || { if SYNC.load(Ordering::Acquire) == 2 { - *c.0 //~ ERROR Data race + *c.0 //~ ERROR Data race detected between Read on Thread(id = 2) and Write on Thread(id = 1) } else { 0 } diff --git a/tests/compile-fail/data_race/rmw_race.rs b/tests/compile-fail/data_race/rmw_race.rs index e523f8b374..fcf683a65d 100644 --- a/tests/compile-fail/data_race/rmw_race.rs +++ b/tests/compile-fail/data_race/rmw_race.rs @@ -38,7 +38,7 @@ pub fn main() { let j3 = spawn(move || { if SYNC.load(Ordering::Acquire) == 3 { - *c.0 //~ ERROR Data race + *c.0 //~ ERROR Data race detected between Read on Thread(id = 3) and Write on Thread(id = 1) } else { 0 } diff --git a/tests/compile-fail/data_race/write_write_race.rs b/tests/compile-fail/data_race/write_write_race.rs index aca19a46c1..61909eda86 100644 --- a/tests/compile-fail/data_race/write_write_race.rs +++ b/tests/compile-fail/data_race/write_write_race.rs @@ -18,7 +18,7 @@ pub fn main() { }); let j2 = spawn(move || { - *c.0 = 64; //~ ERROR Data race + *c.0 = 64; //~ ERROR Data race detected between Write on Thread(id = 2) and Write on Thread(id = 1) }); j1.join().unwrap(); diff --git a/tests/compile-fail/data_race/write_write_race_stack.rs b/tests/compile-fail/data_race/write_write_race_stack.rs new file mode 100644 index 0000000000..aa1428f8a7 --- /dev/null +++ b/tests/compile-fail/data_race/write_write_race_stack.rs @@ -0,0 +1,57 @@ +// ignore-windows: Concurrency on Windows is not supported yet. +// compile-flags: -Zmiri-disable-isolation + +use std::thread::{spawn, sleep}; +use std::ptr::null_mut; +use std::sync::atomic::{Ordering, AtomicPtr}; +use std::time::Duration; + +#[derive(Copy, Clone)] +struct EvilSend(pub T); + +unsafe impl Send for EvilSend {} +unsafe impl Sync for EvilSend {} + +pub fn main() { + // Shared atomic pointer + let pointer = AtomicPtr::new(null_mut::()); + let ptr = EvilSend(&pointer as *const AtomicPtr); + + // Note: this is scheduler-dependent + // the operations need to occur in + // order, otherwise the allocation is + // not visible to the other-thread to + // detect the race: + // 1. stack-allocate + // 2. atomic_store + // 3. atomic_load + // 4. write-value + // 5. write-value + unsafe { + let j1 = spawn(move || { + // Concurrent allocate the memory. + // Uses relaxed semantics to not generate + // a release sequence. + let pointer = &*ptr.0; + + let mut stack_var = 0usize; + + pointer.store(&mut stack_var as *mut _, Ordering::Release); + + sleep(Duration::from_millis(200)); + + stack_var = 1usize; //~ ERROR Data race detected between Write on Thread(id = 1) and Write on Thread(id = 2) + + // read to silence errors + stack_var + }); + + let j2 = spawn(move || { + let pointer = &*ptr.0; + *pointer.load(Ordering::Acquire) = 3; + }); + + j1.join().unwrap(); + j2.join().unwrap(); + } +} diff --git a/tests/run-pass/concurrency/concurrent_caller_location.rs b/tests/run-pass/concurrency/concurrent_caller_location.rs new file mode 100644 index 0000000000..d509d1b3f7 --- /dev/null +++ b/tests/run-pass/concurrency/concurrent_caller_location.rs @@ -0,0 +1,19 @@ +// ignore-windows: Concurrency on Windows is not supported yet. + +use std::thread::spawn; +use std::panic::Location; + +fn initialize() { + let _ignore = initialize_inner(); +} + +fn initialize_inner() -> &'static Location<'static> { + Location::caller() +} + +fn main() { + let j1 = spawn(initialize); + let j2 = spawn(initialize); + j1.join().unwrap(); + j2.join().unwrap(); +} diff --git a/tests/run-pass/concurrency/concurrent_caller_location.stderr b/tests/run-pass/concurrency/concurrent_caller_location.stderr new file mode 100644 index 0000000000..03676519d4 --- /dev/null +++ b/tests/run-pass/concurrency/concurrent_caller_location.stderr @@ -0,0 +1,2 @@ +warning: thread support is experimental and incomplete: weak memory effects are not emulated. + diff --git a/tests/run-pass/concurrency/issue1643.rs b/tests/run-pass/concurrency/issue1643.rs new file mode 100644 index 0000000000..1238a1bd6f --- /dev/null +++ b/tests/run-pass/concurrency/issue1643.rs @@ -0,0 +1,16 @@ +// ignore-windows: Concurrency on Windows is not supported yet. + +use std::thread::spawn; + +fn initialize() { + initialize_inner(&mut || false) +} + +fn initialize_inner(_init: &mut dyn FnMut() -> bool) {} + +fn main() { + let j1 = spawn(initialize); + let j2 = spawn(initialize); + j1.join().unwrap(); + j2.join().unwrap(); +} diff --git a/tests/run-pass/concurrency/issue1643.stderr b/tests/run-pass/concurrency/issue1643.stderr new file mode 100644 index 0000000000..03676519d4 --- /dev/null +++ b/tests/run-pass/concurrency/issue1643.stderr @@ -0,0 +1,2 @@ +warning: thread support is experimental and incomplete: weak memory effects are not emulated. +