Skip to content

Commit

Permalink
Auto merge of #51529 - nodakai:improve-sys_common-mutex, r=oli-obk
Browse files Browse the repository at this point in the history
libstd: add an RAII utility for sys_common::mutex::Mutex

It is indeed debatable whether or not we should introduce more sophistication like this to the lowest layer of a system library. In fact, `Drop::drop()` cannot be `unsafe` (IIRC there was a discussion on introducing an unsafe variant of `Drop` whose entire scope must be within `unsafe`)
  • Loading branch information
bors committed Jun 17, 2018
2 parents 2b973e6 + b81da27 commit 86a8f1a
Show file tree
Hide file tree
Showing 9 changed files with 73 additions and 69 deletions.
21 changes: 11 additions & 10 deletions src/libstd/io/lazy.rs
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,9 @@ pub struct Lazy<T> {
init: fn() -> Arc<T>,
}

#[inline]
const fn done<T>() -> *mut Arc<T> { 1_usize as *mut _ }

unsafe impl<T> Sync for Lazy<T> {}

impl<T: Send + Sync + 'static> Lazy<T> {
Expand All @@ -33,17 +36,15 @@ impl<T: Send + Sync + 'static> Lazy<T> {

pub fn get(&'static self) -> Option<Arc<T>> {
unsafe {
self.lock.lock();
let _guard = self.lock.lock();
let ptr = self.ptr.get();
let ret = if ptr.is_null() {
if ptr.is_null() {
Some(self.init())
} else if ptr as usize == 1 {
} else if ptr == done() {
None
} else {
Some((*ptr).clone())
};
self.lock.unlock();
return ret
}
}
}

Expand All @@ -53,10 +54,10 @@ impl<T: Send + Sync + 'static> Lazy<T> {
// the at exit handler). Otherwise we just return the freshly allocated
// `Arc`.
let registered = sys_common::at_exit(move || {
self.lock.lock();
let ptr = self.ptr.get();
self.ptr.set(1 as *mut _);
self.lock.unlock();
let ptr = {
let _guard = self.lock.lock();
self.ptr.replace(done())
};
drop(Box::from_raw(ptr))
});
let ret = (self.init)();
Expand Down
4 changes: 2 additions & 2 deletions src/libstd/sync/mutex.rs
Original file line number Diff line number Diff line change
Expand Up @@ -227,7 +227,7 @@ impl<T: ?Sized> Mutex<T> {
#[stable(feature = "rust1", since = "1.0.0")]
pub fn lock(&self) -> LockResult<MutexGuard<T>> {
unsafe {
self.inner.lock();
self.inner.raw_lock();
MutexGuard::new(self)
}
}
Expand Down Expand Up @@ -454,7 +454,7 @@ impl<'a, T: ?Sized> Drop for MutexGuard<'a, T> {
fn drop(&mut self) {
unsafe {
self.__lock.poison.done(&self.__poison);
self.__lock.inner.unlock();
self.__lock.inner.raw_unlock();
}
}
}
Expand Down
16 changes: 6 additions & 10 deletions src/libstd/sys/redox/args.rs
Original file line number Diff line number Diff line change
Expand Up @@ -73,17 +73,15 @@ mod imp {
CStr::from_ptr(*argv.offset(i) as *const libc::c_char).to_bytes().to_vec()
}).collect();

LOCK.lock();
let _guard = LOCK.lock();
let ptr = get_global_ptr();
assert!((*ptr).is_none());
(*ptr) = Some(box args);
LOCK.unlock();
}

pub unsafe fn cleanup() {
LOCK.lock();
let _guard = LOCK.lock();
*get_global_ptr() = None;
LOCK.unlock();
}

pub fn args() -> Args {
Expand All @@ -96,16 +94,14 @@ mod imp {

fn clone() -> Option<Vec<Vec<u8>>> {
unsafe {
LOCK.lock();
let _guard = LOCK.lock();
let ptr = get_global_ptr();
let ret = (*ptr).as_ref().map(|s| (**s).clone());
LOCK.unlock();
return ret
(*ptr).as_ref().map(|s| (**s).clone())
}
}

fn get_global_ptr() -> *mut Option<Box<Vec<Vec<u8>>>> {
unsafe { mem::transmute(&GLOBAL_ARGS_PTR) }
unsafe fn get_global_ptr() -> *mut Option<Box<Vec<Vec<u8>>>> {
mem::transmute(&GLOBAL_ARGS_PTR)
}

}
14 changes: 5 additions & 9 deletions src/libstd/sys/unix/args.rs
Original file line number Diff line number Diff line change
Expand Up @@ -82,17 +82,15 @@ mod imp {
static LOCK: Mutex = Mutex::new();

pub unsafe fn init(argc: isize, argv: *const *const u8) {
LOCK.lock();
let _guard = LOCK.lock();
ARGC = argc;
ARGV = argv;
LOCK.unlock();
}

pub unsafe fn cleanup() {
LOCK.lock();
let _guard = LOCK.lock();
ARGC = 0;
ARGV = ptr::null();
LOCK.unlock();
}

pub fn args() -> Args {
Expand All @@ -104,13 +102,11 @@ mod imp {

fn clone() -> Vec<OsString> {
unsafe {
LOCK.lock();
let ret = (0..ARGC).map(|i| {
let _guard = LOCK.lock();
(0..ARGC).map(|i| {
let cstr = CStr::from_ptr(*ARGV.offset(i) as *const libc::c_char);
OsStringExt::from_vec(cstr.to_bytes().to_vec())
}).collect();
LOCK.unlock();
return ret
}).collect()
}
}
}
Expand Down
26 changes: 9 additions & 17 deletions src/libstd/sys/unix/os.rs
Original file line number Diff line number Diff line change
Expand Up @@ -409,10 +409,9 @@ pub unsafe fn environ() -> *mut *const *const c_char {
/// environment variables of the current process.
pub fn env() -> Env {
unsafe {
ENV_LOCK.lock();
let _guard = ENV_LOCK.lock();
let mut environ = *environ();
if environ == ptr::null() {
ENV_LOCK.unlock();
panic!("os::env() failure getting env string from OS: {}",
io::Error::last_os_error());
}
Expand All @@ -423,12 +422,10 @@ pub fn env() -> Env {
}
environ = environ.offset(1);
}
let ret = Env {
return Env {
iter: result.into_iter(),
_dont_send_or_sync_me: PhantomData,
};
ENV_LOCK.unlock();
return ret
}
}

fn parse(input: &[u8]) -> Option<(OsString, OsString)> {
Expand All @@ -452,15 +449,14 @@ pub fn getenv(k: &OsStr) -> io::Result<Option<OsString>> {
// always None as well
let k = CString::new(k.as_bytes())?;
unsafe {
ENV_LOCK.lock();
let _guard = ENV_LOCK.lock();
let s = libc::getenv(k.as_ptr()) as *const libc::c_char;
let ret = if s.is_null() {
None
} else {
Some(OsStringExt::from_vec(CStr::from_ptr(s).to_bytes().to_vec()))
};
ENV_LOCK.unlock();
return Ok(ret)
Ok(ret)
}
}

Expand All @@ -469,21 +465,17 @@ pub fn setenv(k: &OsStr, v: &OsStr) -> io::Result<()> {
let v = CString::new(v.as_bytes())?;

unsafe {
ENV_LOCK.lock();
let ret = cvt(libc::setenv(k.as_ptr(), v.as_ptr(), 1)).map(|_| ());
ENV_LOCK.unlock();
return ret
let _guard = ENV_LOCK.lock();
cvt(libc::setenv(k.as_ptr(), v.as_ptr(), 1)).map(|_| ())
}
}

pub fn unsetenv(n: &OsStr) -> io::Result<()> {
let nbuf = CString::new(n.as_bytes())?;

unsafe {
ENV_LOCK.lock();
let ret = cvt(libc::unsetenv(nbuf.as_ptr())).map(|_| ());
ENV_LOCK.unlock();
return ret
let _guard = ENV_LOCK.lock();
cvt(libc::unsetenv(nbuf.as_ptr())).map(|_| ())
}
}

Expand Down
27 changes: 14 additions & 13 deletions src/libstd/sys_common/at_exit_imp.rs
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@

use boxed::FnBox;
use ptr;
use mem;
use sys_common::mutex::Mutex;

type Queue = Vec<Box<FnBox()>>;
Expand All @@ -25,6 +26,8 @@ type Queue = Vec<Box<FnBox()>>;
static LOCK: Mutex = Mutex::new();
static mut QUEUE: *mut Queue = ptr::null_mut();

const DONE: *mut Queue = 1_usize as *mut _;

// The maximum number of times the cleanup routines will be run. While running
// the at_exit closures new ones may be registered, and this count is the number
// of times the new closures will be allowed to register successfully. After
Expand All @@ -35,7 +38,7 @@ unsafe fn init() -> bool {
if QUEUE.is_null() {
let state: Box<Queue> = box Vec::new();
QUEUE = Box::into_raw(state);
} else if QUEUE as usize == 1 {
} else if QUEUE == DONE {
// can't re-init after a cleanup
return false
}
Expand All @@ -44,18 +47,18 @@ unsafe fn init() -> bool {
}

pub fn cleanup() {
for i in 0..ITERS {
for i in 1..=ITERS {
unsafe {
LOCK.lock();
let queue = QUEUE;
QUEUE = if i == ITERS - 1 {1} else {0} as *mut _;
LOCK.unlock();
let queue = {
let _guard = LOCK.lock();
mem::replace(&mut QUEUE, if i == ITERS { DONE } else { ptr::null_mut() })
};

// make sure we're not recursively cleaning up
assert!(queue as usize != 1);
assert!(queue != DONE);

// If we never called init, not need to cleanup!
if queue as usize != 0 {
if !queue.is_null() {
let queue: Box<Queue> = Box::from_raw(queue);
for to_run in *queue {
to_run();
Expand All @@ -66,15 +69,13 @@ pub fn cleanup() {
}

pub fn push(f: Box<FnBox()>) -> bool {
let mut ret = true;
unsafe {
LOCK.lock();
let _guard = LOCK.lock();
if init() {
(*QUEUE).push(f);
true
} else {
ret = false;
false
}
LOCK.unlock();
}
ret
}
26 changes: 24 additions & 2 deletions src/libstd/sys_common/mutex.rs
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,15 @@ impl Mutex {
/// Behavior is undefined if the mutex has been moved between this and any
/// previous function call.
#[inline]
pub unsafe fn lock(&self) { self.0.lock() }
pub unsafe fn raw_lock(&self) { self.0.lock() }

/// Calls raw_lock() and then returns an RAII guard to guarantee the mutex
/// will be unlocked.
#[inline]
pub unsafe fn lock(&self) -> MutexGuard {
self.raw_lock();
MutexGuard(&self.0)
}

/// Attempts to lock the mutex without blocking, returning whether it was
/// successfully acquired or not.
Expand All @@ -51,8 +59,11 @@ impl Mutex {
///
/// Behavior is undefined if the current thread does not actually hold the
/// mutex.
///
/// Consider switching from the pair of raw_lock() and raw_unlock() to
/// lock() whenever possible.
#[inline]
pub unsafe fn unlock(&self) { self.0.unlock() }
pub unsafe fn raw_unlock(&self) { self.0.unlock() }

/// Deallocates all resources associated with this mutex.
///
Expand All @@ -64,3 +75,14 @@ impl Mutex {

// not meant to be exported to the outside world, just the containing module
pub fn raw(mutex: &Mutex) -> &imp::Mutex { &mutex.0 }

#[must_use]
/// A simple RAII utility for the above Mutex without the poisoning semantics.
pub struct MutexGuard<'a>(&'a imp::Mutex);

impl<'a> Drop for MutexGuard<'a> {
#[inline]
fn drop(&mut self) {
unsafe { self.0.unlock(); }
}
}
3 changes: 1 addition & 2 deletions src/libstd/sys_common/thread_local.rs
Original file line number Diff line number Diff line change
Expand Up @@ -162,13 +162,12 @@ impl StaticKey {
// we just simplify the whole branch.
if imp::requires_synchronized_create() {
static INIT_LOCK: Mutex = Mutex::new();
INIT_LOCK.lock();
let _guard = INIT_LOCK.lock();
let mut key = self.key.load(Ordering::SeqCst);
if key == 0 {
key = imp::create(self.dtor) as usize;
self.key.store(key, Ordering::SeqCst);
}
INIT_LOCK.unlock();
rtassert!(key != 0);
return key
}
Expand Down
5 changes: 1 addition & 4 deletions src/libstd/thread/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -935,20 +935,17 @@ impl ThreadId {
static mut COUNTER: u64 = 0;

unsafe {
GUARD.lock();
let _guard = GUARD.lock();

// If we somehow use up all our bits, panic so that we're not
// covering up subtle bugs of IDs being reused.
if COUNTER == ::u64::MAX {
GUARD.unlock();
panic!("failed to generate unique thread ID: bitspace exhausted");
}

let id = COUNTER;
COUNTER += 1;

GUARD.unlock();

ThreadId(id)
}
}
Expand Down

0 comments on commit 86a8f1a

Please sign in to comment.