From ec964482216ac75d89ade1d683557d1100e0865e Mon Sep 17 00:00:00 2001 From: Amanieu d'Antras Date: Wed, 2 Jan 2019 01:49:13 +0100 Subject: [PATCH 1/6] Remove non-generic version of parking_lot API This API is only used in cold paths, so there is no point avoiding monomorphization costs. --- core/src/parking_lot.rs | 58 ----------------------------------------- 1 file changed, 58 deletions(-) diff --git a/core/src/parking_lot.rs b/core/src/parking_lot.rs index 66be2aed..246b958d 100644 --- a/core/src/parking_lot.rs +++ b/core/src/parking_lot.rs @@ -558,28 +558,6 @@ where B: FnOnce(), T: FnOnce(usize, bool), { - let mut v = Some(validate); - let mut b = Some(before_sleep); - let mut t = Some(timed_out); - park_internal( - key, - &mut || v.take().unchecked_unwrap()(), - &mut || b.take().unchecked_unwrap()(), - &mut |key, was_last_thread| t.take().unchecked_unwrap()(key, was_last_thread), - park_token, - timeout, - ) -} - -// Non-generic version to reduce monomorphization cost -unsafe fn park_internal( - key: usize, - validate: &mut FnMut() -> bool, - before_sleep: &mut FnMut(), - timed_out: &mut FnMut(usize, bool), - park_token: ParkToken, - timeout: Option, -) -> ParkResult { // Grab our thread data, this also ensures that the hash table exists with_thread_data(|thread_data| { // Lock the bucket for the given key @@ -706,15 +684,6 @@ pub unsafe fn unpark_one(key: usize, callback: C) -> UnparkResult where C: FnOnce(UnparkResult) -> UnparkToken, { - let mut c = Some(callback); - unpark_one_internal(key, &mut |result| c.take().unchecked_unwrap()(result)) -} - -// Non-generic version to reduce monomorphization cost -unsafe fn unpark_one_internal( - key: usize, - callback: &mut FnMut(UnparkResult) -> UnparkToken, -) -> UnparkResult { // Lock the bucket for the given key let bucket = lock_bucket(key); @@ -869,23 +838,6 @@ where V: FnOnce() -> RequeueOp, C: FnOnce(RequeueOp, UnparkResult) -> UnparkToken, { - let mut v = Some(validate); - let mut c = Some(callback); - unpark_requeue_internal( - key_from, - key_to, - &mut || v.take().unchecked_unwrap()(), - &mut |op, r| c.take().unchecked_unwrap()(op, r), - ) -} - -// Non-generic version to reduce monomorphization cost -unsafe fn unpark_requeue_internal( - key_from: usize, - key_to: usize, - validate: &mut FnMut() -> RequeueOp, - callback: &mut FnMut(RequeueOp, UnparkResult) -> UnparkToken, -) -> UnparkResult { // Lock the two buckets for the given key let (bucket_from, bucket_to) = lock_bucket_pair(key_from, key_to); @@ -1014,16 +966,6 @@ where F: FnMut(ParkToken) -> FilterOp, C: FnOnce(UnparkResult) -> UnparkToken, { - let mut c = Some(callback); - unpark_filter_internal(key, &mut filter, &mut |r| c.take().unchecked_unwrap()(r)) -} - -// Non-generic version to reduce monomorphization cost -unsafe fn unpark_filter_internal( - key: usize, - filter: &mut FnMut(ParkToken) -> FilterOp, - callback: &mut FnMut(UnparkResult) -> UnparkToken, -) -> UnparkResult { // Lock the bucket for the given key let bucket = lock_bucket(key); From 9df9119889ad575adbb186efea80197477673aae Mon Sep 17 00:00:00 2001 From: Amanieu d'Antras Date: Thu, 3 Jan 2019 20:03:35 +0100 Subject: [PATCH 2/6] Make functions from parking_lot_core #[inline] These are only ever called from #[inline(never)] functions, so there shouldn't be too much code duplication. --- core/src/parking_lot.rs | 61 +++++++++++++------ core/src/thread_parker/generic.rs | 7 +++ core/src/thread_parker/linux.rs | 7 +++ core/src/thread_parker/unix.rs | 13 ++++ core/src/thread_parker/windows/keyed_event.rs | 9 +++ core/src/thread_parker/windows/mod.rs | 18 +++++- core/src/thread_parker/windows/waitaddress.rs | 6 ++ core/src/word_lock.rs | 2 + 8 files changed, 102 insertions(+), 21 deletions(-) diff --git a/core/src/parking_lot.rs b/core/src/parking_lot.rs index 246b958d..440dde0d 100644 --- a/core/src/parking_lot.rs +++ b/core/src/parking_lot.rs @@ -38,6 +38,7 @@ struct HashTable { } impl HashTable { + #[inline] fn new(num_threads: usize, prev: *const HashTable) -> Box { let new_size = (num_threads * LOAD_FACTOR).next_power_of_two(); let hash_bits = 0usize.leading_zeros() - new_size.leading_zeros() - 1; @@ -69,6 +70,7 @@ struct Bucket { } impl Bucket { + #[inline] pub fn new() -> Self { Self { mutex: WordLock::new(), @@ -83,6 +85,7 @@ impl Bucket { // Implementation of Clone for Bucket, needed to make vec![] work impl Clone for Bucket { + #[inline] fn clone(&self) -> Self { Self::new() } @@ -97,6 +100,7 @@ struct FairTimeout { } impl FairTimeout { + #[inline] fn new() -> FairTimeout { FairTimeout { timeout: Instant::now(), @@ -105,6 +109,7 @@ impl FairTimeout { } // Determine whether we should force a fair unlock, and update the timeout + #[inline] fn should_timeout(&mut self) -> bool { let now = Instant::now(); if now > self.timeout { @@ -163,6 +168,7 @@ impl ThreadData { } // Invokes the given closure with a reference to the current thread `ThreadData`. +#[inline(always)] fn with_thread_data(f: F) -> T where F: FnOnce(&ThreadData) -> T, @@ -196,32 +202,41 @@ impl Drop for ThreadData { } // Get a pointer to the latest hash table, creating one if it doesn't exist yet. +#[inline] fn get_hashtable() -> *mut HashTable { - let mut table = HASHTABLE.load(Ordering::Acquire); + let table = HASHTABLE.load(Ordering::Acquire); // If there is no table, create one if table.is_null() { - let new_table = Box::into_raw(HashTable::new(LOAD_FACTOR, ptr::null())); - - // If this fails then it means some other thread created the hash - // table first. - match HASHTABLE.compare_exchange( - ptr::null_mut(), - new_table, - Ordering::Release, - Ordering::Relaxed, - ) { - Ok(_) => return new_table, - Err(x) => table = x, - } + create_hashtable() + } else { + table + } +} - // Free the table we created - unsafe { - Box::from_raw(new_table); +// Get a pointer to the latest hash table, creating one if it doesn't exist yet. +#[cold] +#[inline(never)] +fn create_hashtable() -> *mut HashTable { + let new_table = Box::into_raw(HashTable::new(LOAD_FACTOR, ptr::null())); + + // If this fails then it means some other thread created the hash + // table first. + match HASHTABLE.compare_exchange( + ptr::null_mut(), + new_table, + Ordering::Release, + Ordering::Relaxed, + ) { + Ok(_) => new_table, + Err(old_table) => { + // Free the table we created + unsafe { + Box::from_raw(new_table); + } + old_table } } - - table } // Grow the hash table so that it is big enough for the given number of threads. @@ -312,15 +327,18 @@ unsafe fn grow_hashtable(num_threads: usize) { // Hash function for addresses #[cfg(target_pointer_width = "32")] +#[inline] fn hash(key: usize, bits: u32) -> usize { key.wrapping_mul(0x9E3779B9) >> (32 - bits) } #[cfg(target_pointer_width = "64")] +#[inline] fn hash(key: usize, bits: u32) -> usize { key.wrapping_mul(0x9E3779B97F4A7C15) >> (64 - bits) } // Lock the bucket for the given key +#[inline] unsafe fn lock_bucket<'a>(key: usize) -> &'a Bucket { let mut bucket; loop { @@ -345,6 +363,7 @@ unsafe fn lock_bucket<'a>(key: usize) -> &'a Bucket { // Lock the bucket for the given key, but check that the key hasn't been changed // in the meantime due to a requeue. +#[inline] unsafe fn lock_bucket_checked<'a>(key: &AtomicUsize) -> (usize, &'a Bucket) { let mut bucket; loop { @@ -372,6 +391,7 @@ unsafe fn lock_bucket_checked<'a>(key: &AtomicUsize) -> (usize, &'a Bucket) { } // Lock the two buckets for the given pair of keys +#[inline] unsafe fn lock_bucket_pair<'a>(key1: usize, key2: usize) -> (&'a Bucket, &'a Bucket) { let mut bucket1; loop { @@ -412,6 +432,7 @@ unsafe fn lock_bucket_pair<'a>(key1: usize, key2: usize) -> (&'a Bucket, &'a Buc } // Unlock a pair of buckets +#[inline] unsafe fn unlock_bucket_pair(bucket1: &Bucket, bucket2: &Bucket) { if bucket1 as *const _ == bucket2 as *const _ { bucket1.mutex.unlock(); @@ -439,6 +460,7 @@ pub enum ParkResult { impl ParkResult { /// Returns true if we were unparked by another thread. + #[inline] pub fn is_unparked(self) -> bool { if let ParkResult::Unparked(_) = self { true @@ -754,6 +776,7 @@ where /// You should only call this function with an address that you control, since /// you could otherwise interfere with the operation of other synchronization /// primitives. +#[inline] pub unsafe fn unpark_all(key: usize, unpark_token: UnparkToken) -> usize { // Lock the bucket for the given key let bucket = lock_bucket(key); diff --git a/core/src/thread_parker/generic.rs b/core/src/thread_parker/generic.rs index 45fd1e2b..eedf733e 100644 --- a/core/src/thread_parker/generic.rs +++ b/core/src/thread_parker/generic.rs @@ -20,6 +20,7 @@ pub struct ThreadParker { impl ThreadParker { pub const IS_CHEAP_TO_CONSTRUCT: bool = true; + #[inline] pub fn new() -> ThreadParker { ThreadParker { parked: AtomicBool::new(false), @@ -27,18 +28,21 @@ impl ThreadParker { } // Prepares the parker. This should be called before adding it to the queue. + #[inline] pub fn prepare_park(&self) { self.parked.store(true, Ordering::Relaxed); } // Checks if the park timed out. This should be called while holding the // queue lock after park_until has returned false. + #[inline] pub fn timed_out(&self) -> bool { self.parked.load(Ordering::Relaxed) != false } // Parks the thread until it is unparked. This should be called after it has // been added to the queue, after unlocking the queue. + #[inline] pub fn park(&self) { while self.parked.load(Ordering::Acquire) != false { spin_loop_hint(); @@ -48,6 +52,7 @@ impl ThreadParker { // Parks the thread until it is unparked or the timeout is reached. This // should be called after it has been added to the queue, after unlocking // the queue. Returns true if we were unparked and false if we timed out. + #[inline] pub fn park_until(&self, timeout: Instant) -> bool { while self.parked.load(Ordering::Acquire) != false { if Instant::now() >= timeout { @@ -61,6 +66,7 @@ impl ThreadParker { // Locks the parker to prevent the target thread from exiting. This is // necessary to ensure that thread-local ThreadData objects remain valid. // This should be called while holding the queue lock. + #[inline] pub fn unpark_lock(&self) -> UnparkHandle { // We don't need to lock anything, just clear the state self.parked.store(false, Ordering::Release); @@ -76,6 +82,7 @@ pub struct UnparkHandle(()); impl UnparkHandle { // Wakes up the parked thread. This should be called after the queue lock is // released to avoid blocking the queue for too long. + #[inline] pub fn unpark(self) {} } diff --git a/core/src/thread_parker/linux.rs b/core/src/thread_parker/linux.rs index b4d39b95..040301a3 100644 --- a/core/src/thread_parker/linux.rs +++ b/core/src/thread_parker/linux.rs @@ -32,6 +32,7 @@ pub struct ThreadParker { impl ThreadParker { pub const IS_CHEAP_TO_CONSTRUCT: bool = true; + #[inline] pub fn new() -> ThreadParker { ThreadParker { futex: AtomicI32::new(0), @@ -39,18 +40,21 @@ impl ThreadParker { } // Prepares the parker. This should be called before adding it to the queue. + #[inline] pub fn prepare_park(&self) { self.futex.store(1, Ordering::Relaxed); } // Checks if the park timed out. This should be called while holding the // queue lock after park_until has returned false. + #[inline] pub fn timed_out(&self) -> bool { self.futex.load(Ordering::Relaxed) != 0 } // Parks the thread until it is unparked. This should be called after it has // been added to the queue, after unlocking the queue. + #[inline] pub fn park(&self) { while self.futex.load(Ordering::Acquire) != 0 { self.futex_wait(None); @@ -60,6 +64,7 @@ impl ThreadParker { // Parks the thread until it is unparked or the timeout is reached. This // should be called after it has been added to the queue, after unlocking // the queue. Returns true if we were unparked and false if we timed out. + #[inline] pub fn park_until(&self, timeout: Instant) -> bool { while self.futex.load(Ordering::Acquire) != 0 { let now = Instant::now(); @@ -111,6 +116,7 @@ impl ThreadParker { // Locks the parker to prevent the target thread from exiting. This is // necessary to ensure that thread-local ThreadData objects remain valid. // This should be called while holding the queue lock. + #[inline] pub fn unpark_lock(&self) -> UnparkHandle { // We don't need to lock anything, just clear the state self.futex.store(0, Ordering::Release); @@ -129,6 +135,7 @@ pub struct UnparkHandle { impl UnparkHandle { // Wakes up the parked thread. This should be called after the queue lock is // released to avoid blocking the queue for too long. + #[inline] pub fn unpark(self) { // The thread data may have been freed at this point, but it doesn't // matter since the syscall will just return EFAULT in that case. diff --git a/core/src/thread_parker/unix.rs b/core/src/thread_parker/unix.rs index 57d4e612..1b21d1f9 100644 --- a/core/src/thread_parker/unix.rs +++ b/core/src/thread_parker/unix.rs @@ -33,6 +33,7 @@ pub struct ThreadParker { impl ThreadParker { pub const IS_CHEAP_TO_CONSTRUCT: bool = false; + #[inline] pub fn new() -> ThreadParker { ThreadParker { should_park: Cell::new(false), @@ -44,8 +45,10 @@ impl ThreadParker { // Initializes the condvar to use CLOCK_MONOTONIC instead of CLOCK_REALTIME. #[cfg(any(target_os = "macos", target_os = "ios", target_os = "android"))] + #[inline] unsafe fn init(&self) {} #[cfg(not(any(target_os = "macos", target_os = "ios", target_os = "android")))] + #[inline] unsafe fn init(&self) { let mut attr: libc::pthread_condattr_t = mem::uninitialized(); let r = libc::pthread_condattr_init(&mut attr); @@ -59,6 +62,7 @@ impl ThreadParker { } // Prepares the parker. This should be called before adding it to the queue. + #[inline] pub unsafe fn prepare_park(&self) { self.should_park.set(true); if !self.initialized.get() { @@ -69,6 +73,7 @@ impl ThreadParker { // Checks if the park timed out. This should be called while holding the // queue lock after park_until has returned false. + #[inline] pub unsafe fn timed_out(&self) -> bool { // We need to grab the mutex here because another thread may be // concurrently executing UnparkHandle::unpark, which is done without @@ -83,6 +88,7 @@ impl ThreadParker { // Parks the thread until it is unparked. This should be called after it has // been added to the queue, after unlocking the queue. + #[inline] pub unsafe fn park(&self) { let r = libc::pthread_mutex_lock(self.mutex.get()); debug_assert_eq!(r, 0); @@ -97,6 +103,7 @@ impl ThreadParker { // Parks the thread until it is unparked or the timeout is reached. This // should be called after it has been added to the queue, after unlocking // the queue. Returns true if we were unparked and false if we timed out. + #[inline] pub unsafe fn park_until(&self, timeout: Instant) -> bool { let r = libc::pthread_mutex_lock(self.mutex.get()); debug_assert_eq!(r, 0); @@ -132,6 +139,7 @@ impl ThreadParker { // Locks the parker to prevent the target thread from exiting. This is // necessary to ensure that thread-local ThreadData objects remain valid. // This should be called while holding the queue lock. + #[inline] pub unsafe fn unpark_lock(&self) -> UnparkHandle { let r = libc::pthread_mutex_lock(self.mutex.get()); debug_assert_eq!(r, 0); @@ -143,6 +151,7 @@ impl ThreadParker { } impl Drop for ThreadParker { + #[inline] fn drop(&mut self) { // On DragonFly pthread_mutex_destroy() returns EINVAL if called on a // mutex that was just initialized with libc::PTHREAD_MUTEX_INITIALIZER. @@ -175,6 +184,7 @@ pub struct UnparkHandle { impl UnparkHandle { // Wakes up the parked thread. This should be called after the queue lock is // released to avoid blocking the queue for too long. + #[inline] pub unsafe fn unpark(self) { (*self.thread_parker).should_park.set(false); @@ -190,6 +200,7 @@ impl UnparkHandle { // Returns the current time on the clock used by pthread_cond_t as a timespec. #[cfg(any(target_os = "macos", target_os = "ios"))] +#[inline] fn timespec_now() -> libc::timespec { let mut now: libc::timeval = unsafe { mem::uninitialized() }; let r = unsafe { libc::gettimeofday(&mut now, ptr::null_mut()) }; @@ -200,6 +211,7 @@ fn timespec_now() -> libc::timespec { } } #[cfg(not(any(target_os = "macos", target_os = "ios")))] +#[inline] fn timespec_now() -> libc::timespec { let mut now: libc::timespec = unsafe { mem::uninitialized() }; let clock = if cfg!(target_os = "android") { @@ -216,6 +228,7 @@ fn timespec_now() -> libc::timespec { // Converts a relative timeout into an absolute timeout in the clock used by // pthread_cond_t. +#[inline] fn timeout_to_timespec(timeout: Duration) -> Option { // Handle overflows early on if timeout.as_secs() > libc::time_t::max_value() as u64 { diff --git a/core/src/thread_parker/windows/keyed_event.rs b/core/src/thread_parker/windows/keyed_event.rs index 5a3a11f4..bb8bcb11 100644 --- a/core/src/thread_parker/windows/keyed_event.rs +++ b/core/src/thread_parker/windows/keyed_event.rs @@ -40,10 +40,12 @@ pub struct KeyedEvent { } impl KeyedEvent { + #[inline] unsafe fn wait_for(&self, key: PVOID, timeout: PLARGE_INTEGER) -> NTSTATUS { (self.NtWaitForKeyedEvent)(self.handle, key, 0, timeout) } + #[inline] unsafe fn release(&self, key: PVOID) -> NTSTATUS { (self.NtReleaseKeyedEvent)(self.handle, key, 0, ptr::null_mut()) } @@ -97,19 +99,23 @@ impl KeyedEvent { } } + #[inline] pub fn prepare_park(&'static self, key: &AtomicUsize) { key.store(STATE_PARKED, Ordering::Relaxed); } + #[inline] pub fn timed_out(&'static self, key: &AtomicUsize) -> bool { key.load(Ordering::Relaxed) == STATE_TIMED_OUT } + #[inline] pub unsafe fn park(&'static self, key: &AtomicUsize) { let status = self.wait_for(key as *const _ as PVOID, ptr::null_mut()); debug_assert_eq!(status, STATUS_SUCCESS); } + #[inline] pub unsafe fn park_until(&'static self, key: &AtomicUsize, timeout: Instant) -> bool { let now = Instant::now(); if timeout <= now { @@ -155,6 +161,7 @@ impl KeyedEvent { false } + #[inline] pub unsafe fn unpark_lock(&'static self, key: &AtomicUsize) -> UnparkHandle { // If the state was STATE_PARKED then we need to wake up the thread if key.swap(STATE_UNPARKED, Ordering::Relaxed) == STATE_PARKED { @@ -172,6 +179,7 @@ impl KeyedEvent { } impl Drop for KeyedEvent { + #[inline] fn drop(&mut self) { unsafe { let ok = CloseHandle(self.handle); @@ -191,6 +199,7 @@ pub struct UnparkHandle { impl UnparkHandle { // Wakes up the parked thread. This should be called after the queue lock is // released to avoid blocking the queue for too long. + #[inline] pub unsafe fn unpark(self) { if !self.key.is_null() { let status = self.keyed_event.release(self.key as PVOID); diff --git a/core/src/thread_parker/windows/mod.rs b/core/src/thread_parker/windows/mod.rs index 0f2ef099..537501d3 100644 --- a/core/src/thread_parker/windows/mod.rs +++ b/core/src/thread_parker/windows/mod.rs @@ -18,16 +18,23 @@ enum Backend { WaitAddress(waitaddress::WaitAddress), } +static BACKEND: AtomicPtr = AtomicPtr::new(ptr::null_mut()); + impl Backend { + #[inline] fn get() -> &'static Backend { - static BACKEND: AtomicPtr = AtomicPtr::new(ptr::null_mut()); - // Fast path: use the existing object let backend_ptr = BACKEND.load(Ordering::Acquire); if !backend_ptr.is_null() { return unsafe { &*backend_ptr }; }; + Backend::create() + } + + #[cold] + #[inline(never)] + fn create() -> &'static Backend { // Try to create a new Backend let backend; if let Some(waitaddress) = waitaddress::WaitAddress::create() { @@ -70,6 +77,7 @@ pub struct ThreadParker { impl ThreadParker { pub const IS_CHEAP_TO_CONSTRUCT: bool = true; + #[inline] pub fn new() -> ThreadParker { // Initialize the backend here to ensure we don't get any panics // later on, which could leave synchronization primitives in a broken @@ -81,6 +89,7 @@ impl ThreadParker { } // Prepares the parker. This should be called before adding it to the queue. + #[inline] pub fn prepare_park(&self) { match *self.backend { Backend::KeyedEvent(ref x) => x.prepare_park(&self.key), @@ -90,6 +99,7 @@ impl ThreadParker { // Checks if the park timed out. This should be called while holding the // queue lock after park_until has returned false. + #[inline] pub fn timed_out(&self) -> bool { match *self.backend { Backend::KeyedEvent(ref x) => x.timed_out(&self.key), @@ -99,6 +109,7 @@ impl ThreadParker { // Parks the thread until it is unparked. This should be called after it has // been added to the queue, after unlocking the queue. + #[inline] pub unsafe fn park(&self) { match *self.backend { Backend::KeyedEvent(ref x) => x.park(&self.key), @@ -109,6 +120,7 @@ impl ThreadParker { // Parks the thread until it is unparked or the timeout is reached. This // should be called after it has been added to the queue, after unlocking // the queue. Returns true if we were unparked and false if we timed out. + #[inline] pub unsafe fn park_until(&self, timeout: Instant) -> bool { match *self.backend { Backend::KeyedEvent(ref x) => x.park_until(&self.key, timeout), @@ -119,6 +131,7 @@ impl ThreadParker { // Locks the parker to prevent the target thread from exiting. This is // necessary to ensure that thread-local ThreadData objects remain valid. // This should be called while holding the queue lock. + #[inline] pub unsafe fn unpark_lock(&self) -> UnparkHandle { match *self.backend { Backend::KeyedEvent(ref x) => UnparkHandle::KeyedEvent(x.unpark_lock(&self.key)), @@ -138,6 +151,7 @@ pub enum UnparkHandle { impl UnparkHandle { // Wakes up the parked thread. This should be called after the queue lock is // released to avoid blocking the queue for too long. + #[inline] pub unsafe fn unpark(self) { match self { UnparkHandle::KeyedEvent(x) => x.unpark(), diff --git a/core/src/thread_parker/windows/waitaddress.rs b/core/src/thread_parker/windows/waitaddress.rs index 91e49541..cd449c5c 100644 --- a/core/src/thread_parker/windows/waitaddress.rs +++ b/core/src/thread_parker/windows/waitaddress.rs @@ -56,14 +56,17 @@ impl WaitAddress { } } + #[inline] pub fn prepare_park(&'static self, key: &AtomicUsize) { key.store(1, Ordering::Relaxed); } + #[inline] pub fn timed_out(&'static self, key: &AtomicUsize) -> bool { key.load(Ordering::Relaxed) != 0 } + #[inline] pub fn park(&'static self, key: &AtomicUsize) { while key.load(Ordering::Acquire) != 0 { let r = self.wait_on_address(key, INFINITE); @@ -71,6 +74,7 @@ impl WaitAddress { } } + #[inline] pub fn park_until(&'static self, key: &AtomicUsize, timeout: Instant) -> bool { while key.load(Ordering::Acquire) != 0 { let now = Instant::now(); @@ -97,6 +101,7 @@ impl WaitAddress { true } + #[inline] pub fn unpark_lock(&'static self, key: &AtomicUsize) -> UnparkHandle { // We don't need to lock anything, just clear the state key.store(0, Ordering::Release); @@ -130,6 +135,7 @@ pub struct UnparkHandle { impl UnparkHandle { // Wakes up the parked thread. This should be called after the queue lock is // released to avoid blocking the queue for too long. + #[inline] pub fn unpark(self) { (self.waitaddress.WakeByAddressSingle)(self.key as PVOID); } diff --git a/core/src/word_lock.rs b/core/src/word_lock.rs index 7a6b9608..ae8b249b 100644 --- a/core/src/word_lock.rs +++ b/core/src/word_lock.rs @@ -36,6 +36,7 @@ struct ThreadData { } impl ThreadData { + #[inline] fn new() -> ThreadData { assert!(mem::align_of::() > !QUEUE_MASK); ThreadData { @@ -48,6 +49,7 @@ impl ThreadData { } // Invokes the given closure with a reference to the current thread `ThreadData`. +#[inline] fn with_thread_data(f: F) -> T where F: FnOnce(&ThreadData) -> T, From 293e7112909ebf5b1b8a415a26a58bb08964330a Mon Sep 17 00:00:00 2001 From: Amanieu d'Antras Date: Sat, 2 Feb 2019 16:17:14 +0000 Subject: [PATCH 3/6] Improve Debug for locked mutexes/rwlocks --- lock_api/src/mutex.rs | 13 ++++++++++++- lock_api/src/remutex.rs | 13 ++++++++++++- lock_api/src/rwlock.rs | 13 ++++++++++++- src/mutex.rs | 2 +- src/rwlock.rs | 2 +- 5 files changed, 38 insertions(+), 5 deletions(-) diff --git a/lock_api/src/mutex.rs b/lock_api/src/mutex.rs index 435cf2be..003b5302 100644 --- a/lock_api/src/mutex.rs +++ b/lock_api/src/mutex.rs @@ -271,7 +271,18 @@ impl fmt::Debug for Mutex { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self.try_lock() { Some(guard) => f.debug_struct("Mutex").field("data", &&*guard).finish(), - None => f.pad("Mutex { }"), + None => { + struct LockedPlaceholder; + impl fmt::Debug for LockedPlaceholder { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.write_str("") + } + } + + f.debug_struct("Mutex") + .field("data", &LockedPlaceholder) + .finish() + } } } } diff --git a/lock_api/src/remutex.rs b/lock_api/src/remutex.rs index bf37ed85..33e1422b 100644 --- a/lock_api/src/remutex.rs +++ b/lock_api/src/remutex.rs @@ -338,7 +338,18 @@ impl fmt::Debug for Reentra .debug_struct("ReentrantMutex") .field("data", &&*guard) .finish(), - None => f.pad("ReentrantMutex { }"), + None => { + struct LockedPlaceholder; + impl fmt::Debug for LockedPlaceholder { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.write_str("") + } + } + + f.debug_struct("ReentrantMutex") + .field("data", &LockedPlaceholder) + .finish() + } } } } diff --git a/lock_api/src/rwlock.rs b/lock_api/src/rwlock.rs index adbd69ae..998b89d2 100644 --- a/lock_api/src/rwlock.rs +++ b/lock_api/src/rwlock.rs @@ -670,7 +670,18 @@ impl fmt::Debug for RwLock { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self.try_read() { Some(guard) => f.debug_struct("RwLock").field("data", &&*guard).finish(), - None => f.pad("RwLock { }"), + None => { + struct LockedPlaceholder; + impl fmt::Debug for LockedPlaceholder { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.write_str("") + } + } + + f.debug_struct("RwLock") + .field("data", &LockedPlaceholder) + .finish() + } } } } diff --git a/src/mutex.rs b/src/mutex.rs index 0e26570a..99498e10 100644 --- a/src/mutex.rs +++ b/src/mutex.rs @@ -294,6 +294,6 @@ mod tests { }" ); let _lock = mutex.lock(); - assert_eq!(format!("{:?}", mutex), "Mutex { }"); + assert_eq!(format!("{:?}", mutex), "Mutex { data: }"); } } diff --git a/src/rwlock.rs b/src/rwlock.rs index f9709245..63ef4271 100644 --- a/src/rwlock.rs +++ b/src/rwlock.rs @@ -555,7 +555,7 @@ mod tests { }" ); let _lock = x.write(); - assert_eq!(format!("{:?}", x), "RwLock { }"); + assert_eq!(format!("{:?}", x), "RwLock { data: }"); } #[test] From 61f3ec3be7456e7f52b89c23bf112b1c0bc0eff6 Mon Sep 17 00:00:00 2001 From: Amanieu d'Antras Date: Sat, 2 Feb 2019 16:17:52 +0000 Subject: [PATCH 4/6] Update example in Mutex --- src/mutex.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/mutex.rs b/src/mutex.rs index 99498e10..0756c675 100644 --- a/src/mutex.rs +++ b/src/mutex.rs @@ -69,7 +69,7 @@ use raw_mutex::RawMutex; /// /// let (tx, rx) = channel(); /// for _ in 0..10 { -/// let (data, tx) = (data.clone(), tx.clone()); +/// let (data, tx) = (Arc::clone(&data), tx.clone()); /// thread::spawn(move || { /// // The shared state can only be accessed once the lock is held. /// // Our non-atomic increment is safe because we're the only thread From e62e709472db8875a7dfd7565abfd670593ada73 Mon Sep 17 00:00:00 2001 From: Amanieu d'Antras Date: Sat, 2 Feb 2019 16:18:28 +0000 Subject: [PATCH 5/6] Add message on #[must_use] --- lock_api/src/mutex.rs | 4 ++-- lock_api/src/remutex.rs | 4 ++-- lock_api/src/rwlock.rs | 10 +++++----- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/lock_api/src/mutex.rs b/lock_api/src/mutex.rs index 003b5302..7e69c8b2 100644 --- a/lock_api/src/mutex.rs +++ b/lock_api/src/mutex.rs @@ -292,7 +292,7 @@ impl fmt::Debug for Mutex { /// /// The data protected by the mutex can be accessed through this guard via its /// `Deref` and `DerefMut` implementations. -#[must_use] +#[must_use = "if unused the Mutex will immediately unlock"] pub struct MutexGuard<'a, R: RawMutex + 'a, T: ?Sized + 'a> { mutex: &'a Mutex, marker: PhantomData<(&'a mut T, R::GuardMarker)>, @@ -449,7 +449,7 @@ unsafe impl<'a, R: RawMutex + 'a, T: ?Sized + 'a> StableAddress for MutexGuard<' /// former doesn't support temporarily unlocking and re-locking, since that /// could introduce soundness issues if the locked object is modified by another /// thread. -#[must_use] +#[must_use = "if unused the Mutex will immediately unlock"] pub struct MappedMutexGuard<'a, R: RawMutex + 'a, T: ?Sized + 'a> { raw: &'a R, data: *mut T, diff --git a/lock_api/src/remutex.rs b/lock_api/src/remutex.rs index 33e1422b..42ec4977 100644 --- a/lock_api/src/remutex.rs +++ b/lock_api/src/remutex.rs @@ -359,7 +359,7 @@ impl fmt::Debug for Reentra /// /// The data protected by the mutex can be accessed through this guard via its /// `Deref` implementation. -#[must_use] +#[must_use = "if unused the ReentrantMutex will immediately unlock"] pub struct ReentrantMutexGuard<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> { remutex: &'a ReentrantMutex, marker: PhantomData<(&'a T, GuardNoSend)>, @@ -524,7 +524,7 @@ unsafe impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> StableAdd /// former doesn't support temporarily unlocking and re-locking, since that /// could introduce soundness issues if the locked object is modified by another /// thread. -#[must_use] +#[must_use = "if unused the ReentrantMutex will immediately unlock"] pub struct MappedReentrantMutexGuard<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> { raw: &'a RawReentrantMutex, data: *const T, diff --git a/lock_api/src/rwlock.rs b/lock_api/src/rwlock.rs index 998b89d2..a90504e5 100644 --- a/lock_api/src/rwlock.rs +++ b/lock_api/src/rwlock.rs @@ -688,7 +688,7 @@ impl fmt::Debug for RwLock { /// RAII structure used to release the shared read access of a lock when /// dropped. -#[must_use] +#[must_use = "if unused the RwLock will immediately unlock"] pub struct RwLockReadGuard<'a, R: RawRwLock + 'a, T: ?Sized + 'a> { rwlock: &'a RwLock, marker: PhantomData<(&'a T, R::GuardMarker)>, @@ -835,7 +835,7 @@ unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> StableAddress for RwLockReadG /// RAII structure used to release the exclusive write access of a lock when /// dropped. -#[must_use] +#[must_use = "if unused the RwLock will immediately unlock"] pub struct RwLockWriteGuard<'a, R: RawRwLock + 'a, T: ?Sized + 'a> { rwlock: &'a RwLock, marker: PhantomData<(&'a mut T, R::GuardMarker)>, @@ -1023,7 +1023,7 @@ unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> StableAddress for RwLockWrite /// RAII structure used to release the upgradable read access of a lock when /// dropped. -#[must_use] +#[must_use = "if unused the RwLock will immediately unlock"] pub struct RwLockUpgradableReadGuard<'a, R: RawRwLockUpgrade + 'a, T: ?Sized + 'a> { rwlock: &'a RwLock, marker: PhantomData<(&'a T, R::GuardMarker)>, @@ -1221,7 +1221,7 @@ unsafe impl<'a, R: RawRwLockUpgrade + 'a, T: ?Sized + 'a> StableAddress /// former doesn't support temporarily unlocking and re-locking, since that /// could introduce soundness issues if the locked object is modified by another /// thread. -#[must_use] +#[must_use = "if unused the RwLock will immediately unlock"] pub struct MappedRwLockReadGuard<'a, R: RawRwLock + 'a, T: ?Sized + 'a> { raw: &'a R, data: *const T, @@ -1334,7 +1334,7 @@ unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> StableAddress /// former doesn't support temporarily unlocking and re-locking, since that /// could introduce soundness issues if the locked object is modified by another /// thread. -#[must_use] +#[must_use = "if unused the RwLock will immediately unlock"] pub struct MappedRwLockWriteGuard<'a, R: RawRwLock + 'a, T: ?Sized + 'a> { raw: &'a R, data: *mut T, From 318d6f348d794863cc7cb6d7c66ff57805748809 Mon Sep 17 00:00:00 2001 From: Amanieu d'Antras Date: Sat, 2 Feb 2019 16:18:42 +0000 Subject: [PATCH 6/6] Implement Debug and Display on guards --- lock_api/src/mutex.rs | 26 ++++++++++++++ lock_api/src/remutex.rs | 32 +++++++++++++++++ lock_api/src/rwlock.rs | 76 +++++++++++++++++++++++++++++++++++++++++ 3 files changed, 134 insertions(+) diff --git a/lock_api/src/mutex.rs b/lock_api/src/mutex.rs index 7e69c8b2..b643dac9 100644 --- a/lock_api/src/mutex.rs +++ b/lock_api/src/mutex.rs @@ -439,6 +439,18 @@ impl<'a, R: RawMutex + 'a, T: ?Sized + 'a> Drop for MutexGuard<'a, R, T> { } } +impl<'a, R: RawMutex + 'a, T: fmt::Debug + ?Sized + 'a> fmt::Debug for MutexGuard<'a, R, T> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt::Debug::fmt(&**self, f) + } +} + +impl<'a, R: RawMutex + 'a, T: fmt::Display + ?Sized + 'a> fmt::Display for MutexGuard<'a, R, T> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + (**self).fmt(f) + } +} + #[cfg(feature = "owning_ref")] unsafe impl<'a, R: RawMutex + 'a, T: ?Sized + 'a> StableAddress for MutexGuard<'a, R, T> {} @@ -559,5 +571,19 @@ impl<'a, R: RawMutex + 'a, T: ?Sized + 'a> Drop for MappedMutexGuard<'a, R, T> { } } +impl<'a, R: RawMutex + 'a, T: fmt::Debug + ?Sized + 'a> fmt::Debug for MappedMutexGuard<'a, R, T> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt::Debug::fmt(&**self, f) + } +} + +impl<'a, R: RawMutex + 'a, T: fmt::Display + ?Sized + 'a> fmt::Display + for MappedMutexGuard<'a, R, T> +{ + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + (**self).fmt(f) + } +} + #[cfg(feature = "owning_ref")] unsafe impl<'a, R: RawMutex + 'a, T: ?Sized + 'a> StableAddress for MappedMutexGuard<'a, R, T> {} diff --git a/lock_api/src/remutex.rs b/lock_api/src/remutex.rs index 42ec4977..e2dfdd88 100644 --- a/lock_api/src/remutex.rs +++ b/lock_api/src/remutex.rs @@ -511,6 +511,22 @@ impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> Drop } } +impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: fmt::Debug + ?Sized + 'a> fmt::Debug + for ReentrantMutexGuard<'a, R, G, T> +{ + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt::Debug::fmt(&**self, f) + } +} + +impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: fmt::Display + ?Sized + 'a> fmt::Display + for ReentrantMutexGuard<'a, R, G, T> +{ + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + (**self).fmt(f) + } +} + #[cfg(feature = "owning_ref")] unsafe impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> StableAddress for ReentrantMutexGuard<'a, R, G, T> @@ -634,6 +650,22 @@ impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> Drop } } +impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: fmt::Debug + ?Sized + 'a> fmt::Debug + for MappedReentrantMutexGuard<'a, R, G, T> +{ + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt::Debug::fmt(&**self, f) + } +} + +impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: fmt::Display + ?Sized + 'a> fmt::Display + for MappedReentrantMutexGuard<'a, R, G, T> +{ + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + (**self).fmt(f) + } +} + #[cfg(feature = "owning_ref")] unsafe impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> StableAddress for MappedReentrantMutexGuard<'a, R, G, T> diff --git a/lock_api/src/rwlock.rs b/lock_api/src/rwlock.rs index a90504e5..d32998d6 100644 --- a/lock_api/src/rwlock.rs +++ b/lock_api/src/rwlock.rs @@ -830,6 +830,20 @@ impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Drop for RwLockReadGuard<'a, R, T> { } } +impl<'a, R: RawRwLock + 'a, T: fmt::Debug + ?Sized + 'a> fmt::Debug for RwLockReadGuard<'a, R, T> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt::Debug::fmt(&**self, f) + } +} + +impl<'a, R: RawRwLock + 'a, T: fmt::Display + ?Sized + 'a> fmt::Display + for RwLockReadGuard<'a, R, T> +{ + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + (**self).fmt(f) + } +} + #[cfg(feature = "owning_ref")] unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> StableAddress for RwLockReadGuard<'a, R, T> {} @@ -1018,6 +1032,20 @@ impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Drop for RwLockWriteGuard<'a, R, T> } } +impl<'a, R: RawRwLock + 'a, T: fmt::Debug + ?Sized + 'a> fmt::Debug for RwLockWriteGuard<'a, R, T> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt::Debug::fmt(&**self, f) + } +} + +impl<'a, R: RawRwLock + 'a, T: fmt::Display + ?Sized + 'a> fmt::Display + for RwLockWriteGuard<'a, R, T> +{ + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + (**self).fmt(f) + } +} + #[cfg(feature = "owning_ref")] unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> StableAddress for RwLockWriteGuard<'a, R, T> {} @@ -1208,6 +1236,22 @@ impl<'a, R: RawRwLockUpgrade + 'a, T: ?Sized + 'a> Drop for RwLockUpgradableRead } } +impl<'a, R: RawRwLockUpgrade + 'a, T: fmt::Debug + ?Sized + 'a> fmt::Debug + for RwLockUpgradableReadGuard<'a, R, T> +{ + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt::Debug::fmt(&**self, f) + } +} + +impl<'a, R: RawRwLockUpgrade + 'a, T: fmt::Display + ?Sized + 'a> fmt::Display + for RwLockUpgradableReadGuard<'a, R, T> +{ + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + (**self).fmt(f) + } +} + #[cfg(feature = "owning_ref")] unsafe impl<'a, R: RawRwLockUpgrade + 'a, T: ?Sized + 'a> StableAddress for RwLockUpgradableReadGuard<'a, R, T> @@ -1321,6 +1365,22 @@ impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Drop for MappedRwLockReadGuard<'a, R } } +impl<'a, R: RawRwLock + 'a, T: fmt::Debug + ?Sized + 'a> fmt::Debug + for MappedRwLockReadGuard<'a, R, T> +{ + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt::Debug::fmt(&**self, f) + } +} + +impl<'a, R: RawRwLock + 'a, T: fmt::Display + ?Sized + 'a> fmt::Display + for MappedRwLockReadGuard<'a, R, T> +{ + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + (**self).fmt(f) + } +} + #[cfg(feature = "owning_ref")] unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> StableAddress for MappedRwLockReadGuard<'a, R, T> @@ -1464,6 +1524,22 @@ impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Drop for MappedRwLockWriteGuard<'a, } } +impl<'a, R: RawRwLock + 'a, T: fmt::Debug + ?Sized + 'a> fmt::Debug + for MappedRwLockWriteGuard<'a, R, T> +{ + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt::Debug::fmt(&**self, f) + } +} + +impl<'a, R: RawRwLock + 'a, T: fmt::Display + ?Sized + 'a> fmt::Display + for MappedRwLockWriteGuard<'a, R, T> +{ + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + (**self).fmt(f) + } +} + #[cfg(feature = "owning_ref")] unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> StableAddress for MappedRwLockWriteGuard<'a, R, T>