diff --git a/src/alloc_addresses/mod.rs b/src/alloc_addresses/mod.rs index a13b14ca90..50e5526824 100644 --- a/src/alloc_addresses/mod.rs +++ b/src/alloc_addresses/mod.rs @@ -453,7 +453,7 @@ impl<'tcx> MiriMachine<'tcx> { let thread = self.threads.active_thread(); global_state.reuse.add_addr(rng, addr, size, align, kind, thread, || { if let Some(data_race) = &self.data_race { - data_race.release_clock(&self.threads).clone() + data_race.release_clock(&self.threads, |clock| clock.clone()) } else { VClock::default() } diff --git a/src/concurrency/data_race.rs b/src/concurrency/data_race.rs index 82c4f6d300..797b3191d8 100644 --- a/src/concurrency/data_race.rs +++ b/src/concurrency/data_race.rs @@ -828,15 +828,14 @@ pub trait EvalContextExt<'tcx>: MiriInterpCxExt<'tcx> { } } - /// Returns the `release` clock of the current thread. + /// Calls the callback with the "release" clock of the current thread. /// Other threads can acquire this clock in the future to establish synchronization /// with this program point. - fn release_clock<'a>(&'a self) -> Option> - where - 'tcx: 'a, - { + /// + /// The closure will only be invoked if data race handling is on. + fn release_clock(&self, callback: impl FnOnce(&VClock) -> R) -> Option { let this = self.eval_context_ref(); - Some(this.machine.data_race.as_ref()?.release_clock(&this.machine.threads)) + Some(this.machine.data_race.as_ref()?.release_clock(&this.machine.threads, callback)) } /// Acquire the given clock into the current thread, establishing synchronization with @@ -1728,7 +1727,7 @@ impl GlobalState { let current_index = self.active_thread_index(thread_mgr); // Store the terminaion clock. - let terminaion_clock = self.release_clock(thread_mgr).clone(); + let terminaion_clock = self.release_clock(thread_mgr, |clock| clock.clone()); self.thread_info.get_mut()[current_thread].termination_vector_clock = Some(terminaion_clock); @@ -1778,21 +1777,23 @@ impl GlobalState { clocks.clock.join(clock); } - /// Returns the `release` clock of the current thread. + /// Calls the given closure with the "release" clock of the current thread. /// Other threads can acquire this clock in the future to establish synchronization /// with this program point. - pub fn release_clock<'tcx>(&self, threads: &ThreadManager<'tcx>) -> Ref<'_, VClock> { + pub fn release_clock<'tcx, R>( + &self, + threads: &ThreadManager<'tcx>, + callback: impl FnOnce(&VClock) -> R, + ) -> R { let thread = threads.active_thread(); let span = threads.active_thread_ref().current_span(); - // We increment the clock each time this happens, to ensure no two releases - // can be confused with each other. let (index, mut clocks) = self.thread_state_mut(thread); + let r = callback(&clocks.clock); + // Increment the clock, so that all following events cannot be confused with anything that + // occurred before the release. Crucially, the callback is invoked on the *old* clock! clocks.increment_clock(index, span); - drop(clocks); - // To return a read-only view, we need to release the RefCell - // and borrow it again. - let (_index, clocks) = self.thread_state(thread); - Ref::map(clocks, |c| &c.clock) + + r } fn thread_index(&self, thread: ThreadId) -> VectorIdx { diff --git a/src/concurrency/init_once.rs b/src/concurrency/init_once.rs index 7a9b12bbe8..488edc91df 100644 --- a/src/concurrency/init_once.rs +++ b/src/concurrency/init_once.rs @@ -93,7 +93,8 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> { // Each complete happens-before the end of the wait if let Some(data_race) = &this.machine.data_race { - init_once.clock.clone_from(&data_race.release_clock(&this.machine.threads)); + data_race + .release_clock(&this.machine.threads, |clock| init_once.clock.clone_from(clock)); } // Wake up everyone. @@ -119,7 +120,8 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> { // Each complete happens-before the end of the wait if let Some(data_race) = &this.machine.data_race { - init_once.clock.clone_from(&data_race.release_clock(&this.machine.threads)); + data_race + .release_clock(&this.machine.threads, |clock| init_once.clock.clone_from(clock)); } // Wake up one waiting thread, so they can go ahead and try to init this. diff --git a/src/concurrency/sync.rs b/src/concurrency/sync.rs index 5627ccdbbe..e1e748e794 100644 --- a/src/concurrency/sync.rs +++ b/src/concurrency/sync.rs @@ -444,7 +444,9 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> { // The mutex is completely unlocked. Try transferring ownership // to another thread. if let Some(data_race) = &this.machine.data_race { - mutex.clock.clone_from(&data_race.release_clock(&this.machine.threads)); + data_race.release_clock(&this.machine.threads, |clock| { + mutex.clock.clone_from(clock) + }); } if let Some(thread) = this.machine.sync.mutexes[id].queue.pop_front() { this.unblock_thread(thread, BlockReason::Mutex(id))?; @@ -553,7 +555,9 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> { } if let Some(data_race) = &this.machine.data_race { // Add this to the shared-release clock of all concurrent readers. - rwlock.clock_current_readers.join(&data_race.release_clock(&this.machine.threads)); + data_race.release_clock(&this.machine.threads, |clock| { + rwlock.clock_current_readers.join(clock) + }); } // The thread was a reader. If the lock is not held any more, give it to a writer. @@ -632,7 +636,9 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> { trace!("rwlock_writer_unlock: {:?} unlocked by {:?}", id, thread); // Record release clock for next lock holder. if let Some(data_race) = &this.machine.data_race { - rwlock.clock_unlocked.clone_from(&*data_race.release_clock(&this.machine.threads)); + data_race.release_clock(&this.machine.threads, |clock| { + rwlock.clock_unlocked.clone_from(clock) + }); } // The thread was a writer. // @@ -764,7 +770,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> { // Each condvar signal happens-before the end of the condvar wake if let Some(data_race) = data_race { - condvar.clock.clone_from(&*data_race.release_clock(&this.machine.threads)); + data_race.release_clock(&this.machine.threads, |clock| condvar.clock.clone_from(clock)); } let Some(waiter) = condvar.waiters.pop_front() else { return interp_ok(false); @@ -837,7 +843,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> { // Each futex-wake happens-before the end of the futex wait if let Some(data_race) = data_race { - futex.clock.clone_from(&*data_race.release_clock(&this.machine.threads)); + data_race.release_clock(&this.machine.threads, |clock| futex.clock.clone_from(clock)); } // Wake up the first thread in the queue that matches any of the bits in the bitset. diff --git a/src/shims/unix/linux/epoll.rs b/src/shims/unix/linux/epoll.rs index b57347abff..81a6739728 100644 --- a/src/shims/unix/linux/epoll.rs +++ b/src/shims/unix/linux/epoll.rs @@ -568,9 +568,9 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> { let epoll = epfd.downcast::().unwrap(); // Synchronize running thread to the epoll ready list. - if let Some(clock) = &this.release_clock() { + this.release_clock(|clock| { epoll.ready_list.clock.borrow_mut().join(clock); - } + }); if let Some(thread_id) = epoll.thread_id.borrow_mut().pop() { waiter.push(thread_id); diff --git a/src/shims/unix/linux/eventfd.rs b/src/shims/unix/linux/eventfd.rs index 910ab7e90f..35bc933885 100644 --- a/src/shims/unix/linux/eventfd.rs +++ b/src/shims/unix/linux/eventfd.rs @@ -140,9 +140,9 @@ impl FileDescription for Event { match self.counter.get().checked_add(num) { Some(new_count @ 0..=MAX_COUNTER) => { // Future `read` calls will synchronize with this write, so update the FD clock. - if let Some(clock) = &ecx.release_clock() { + ecx.release_clock(|clock| { self.clock.borrow_mut().join(clock); - } + }); self.counter.set(new_count); } None | Some(u64::MAX) => diff --git a/src/shims/unix/unnamed_socket.rs b/src/shims/unix/unnamed_socket.rs index faa54c6a75..e83c0afb28 100644 --- a/src/shims/unix/unnamed_socket.rs +++ b/src/shims/unix/unnamed_socket.rs @@ -234,9 +234,9 @@ impl FileDescription for AnonSocket { } } // Remember this clock so `read` can synchronize with us. - if let Some(clock) = &ecx.release_clock() { + ecx.release_clock(|clock| { writebuf.clock.join(clock); - } + }); // Do full write / partial write based on the space available. let actual_write_size = len.min(available_space); let bytes = ecx.read_bytes_ptr_strip_provenance(ptr, Size::from_bytes(len))?; diff --git a/tests/fail-dep/libc/socketpair-data-race.rs b/tests/fail-dep/libc/socketpair-data-race.rs new file mode 100644 index 0000000000..f4c009456d --- /dev/null +++ b/tests/fail-dep/libc/socketpair-data-race.rs @@ -0,0 +1,31 @@ +//! This is a regression test for : we had some +//! faulty logic around `release_clock` that led to this code not reporting a data race. +//@ignore-target: windows # no libc socketpair on Windows +//@compile-flags: -Zmiri-preemption-rate=0 +use std::thread; + +fn main() { + static mut VAL: u8 = 0; + let mut fds = [-1, -1]; + let res = unsafe { libc::socketpair(libc::AF_UNIX, libc::SOCK_STREAM, 0, fds.as_mut_ptr()) }; + assert_eq!(res, 0); + let thread1 = thread::spawn(move || { + let data = "a".as_bytes().as_ptr(); + let res = unsafe { libc::write(fds[0], data as *const libc::c_void, 1) }; + assert_eq!(res, 1); + // The write to VAL is *after* the write to the socket, so there's no proper synchronization. + unsafe { VAL = 1 }; + }); + thread::yield_now(); + + let mut buf: [u8; 1] = [0; 1]; + let res: i32 = unsafe { + libc::read(fds[1], buf.as_mut_ptr().cast(), buf.len() as libc::size_t).try_into().unwrap() + }; + assert_eq!(res, 1); + assert_eq!(buf, "a".as_bytes()); + + unsafe { assert_eq!({ VAL }, 1) }; //~ERROR: Data race + + thread1.join().unwrap(); +} diff --git a/tests/fail-dep/libc/socketpair-data-race.stderr b/tests/fail-dep/libc/socketpair-data-race.stderr new file mode 100644 index 0000000000..6472c33727 --- /dev/null +++ b/tests/fail-dep/libc/socketpair-data-race.stderr @@ -0,0 +1,20 @@ +error: Undefined Behavior: Data race detected between (1) non-atomic write on thread `unnamed-ID` and (2) non-atomic read on thread `main` at ALLOC. (2) just happened here + --> tests/fail-dep/libc/socketpair-data-race.rs:LL:CC + | +LL | unsafe { assert_eq!({ VAL }, 1) }; + | ^^^ Data race detected between (1) non-atomic write on thread `unnamed-ID` and (2) non-atomic read on thread `main` at ALLOC. (2) just happened here + | +help: and (1) occurred earlier here + --> tests/fail-dep/libc/socketpair-data-race.rs:LL:CC + | +LL | unsafe { VAL = 1 }; + | ^^^^^^^ + = help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior + = help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information + = note: BACKTRACE (of the first span): + = note: inside `main` at tests/fail-dep/libc/socketpair-data-race.rs:LL:CC + +note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace + +error: aborting due to 1 previous error +