Skip to content

Commit

Permalink
fix!: consume node on lock APIs (#4)
Browse files Browse the repository at this point in the history
  • Loading branch information
pedromfedricci authored Sep 30, 2024
1 parent a09cc3e commit 53d1ea4
Show file tree
Hide file tree
Showing 11 changed files with 200 additions and 101 deletions.
18 changes: 6 additions & 12 deletions .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ jobs:
--target ${{ env.NO_STD_TARGET }}
--no-dev-deps
--feature-powerset
--skip yield,thread_local
--skip yield
msrv:
name: MSRV
Expand All @@ -58,17 +58,6 @@ jobs:
- name: Check MSRV
run: cargo check --all-features

semver:
name: Check semver
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v3
- name: Install Rust stable
run: rustup toolchain install stable
- name: Check semver violations
uses: obi1kenobi/cargo-semver-checks-action@v2

docsrs:
name: Build doc
runs-on: ubuntu-latest
Expand Down Expand Up @@ -166,6 +155,11 @@ jobs:
uses: taiki-e/install-action@nextest
- name: Miri test
run: cargo miri nextest run --all-features
- name: Miri test ignore leaks
env:
MIRIFLAGS: "-Zmiri-ignore-leaks"
RUSTFLAGS: "--cfg ignore_leaks"
run: cargo miri test raw::mutex::test_leaks_expected

loom:
name: Loom
Expand Down
19 changes: 19 additions & 0 deletions .github/workflows/semver.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
name: Semver

on:
workflow_dispatch:

env:
CARGO_TERM_COLOR: always

jobs:
semver:
name: Check semver
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v3
- name: Install Rust stable
run: rustup toolchain install stable
- name: Check semver violations
uses: obi1kenobi/cargo-semver-checks-action@v2
4 changes: 2 additions & 2 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ An implementation of Craig and, indenpendently, Magnussen, Landin, and
Hagersten queue lock for mutual exclusion, referred to as CLH lock.
"""
name = "clhlock"
version = "0.2.0-rc1"
version = "0.2.0"
edition = "2021"
# NOTE: Rust 1.70 is required for `AtomicPtr::as_ptr`.
rust-version = "1.70.0"
Expand All @@ -29,4 +29,4 @@ rustdoc-args = ["--cfg", "docsrs"]

[lints.rust.unexpected_cfgs]
level = "warn"
check-cfg = ["cfg(tarpaulin)", "cfg(tarpaulin_include)", "cfg(loom)"]
check-cfg = ["cfg(tarpaulin)", "cfg(tarpaulin_include)", "cfg(loom)", "cfg(ignore_leaks)"]
9 changes: 8 additions & 1 deletion Makefile.toml
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ default_to_workspace = false
[tasks.no-std]
command = "cargo"
args = ["hack", "build", "--target", "thumbv7m-none-eabi", "--feature-powerset",
"--no-dev-deps", "--skip", "yield,thread_local"]
"--no-dev-deps", "--skip", "yield"]

# Build docs for docs.rs.
[tasks.docsrs]
Expand Down Expand Up @@ -63,6 +63,13 @@ install_crate = { rustup_component_name = "miri" }
command = "cargo"
args = ["miri", "nextest", "run", "--all-features", "${@}"]

[tasks.miri-ignore-leaks]
toolchain = "nightly"
install_crate = { rustup_component_name = "miri" }
command = "cargo"
env = { "MIRIFLAGS" = "-Zmiri-ignore-leaks", "RUSTFLAGS" = "--cfg ignore_leaks" }
args = ["miri", "test", "raw::mutex::test_leaks_expected"]

# Check code coverage with tarpaulin (all features).
[tasks.tarpaulin]
command = "cargo"
Expand Down
14 changes: 7 additions & 7 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ Or add a entry under the `[dependencies]` section in your `Cargo.toml`:

[dependencies]
# Available features: `yield`.
clhlock = { version = "0.2.0-rc1", features = ["yield"] }
clhlock = { version = "0.2", features = ["yield"] }
```

## Documentation
Expand Down Expand Up @@ -92,15 +92,15 @@ fn main() {
let c_mutex = Arc::clone(&mutex);

thread::spawn(move || {
// A queue node handle must be mutably accessible.
let mut node = MutexNode::new();
*c_mutex.lock(&mut node) = 10;
// A queue node handle must be consumed.
let node = MutexNode::new();
*c_mutex.lock(node) = 10;
})
.join().expect("thread::spawn failed");

// A queue node handle must be mutably accessible.
let mut node = MutexNode::new();
assert_eq!(*mutex.lock(&mut node), 10);
// A queue node handle must be consumed.
let node = MutexNode::new();
assert_eq!(*mutex.lock(node), 10);
}
```

Expand Down
12 changes: 6 additions & 6 deletions examples/raw.rs
Original file line number Diff line number Diff line change
Expand Up @@ -18,15 +18,15 @@ fn main() {
for _ in 0..N {
let (data, tx) = (data.clone(), tx.clone());
thread::spawn(move || {
// A queue node must be mutably accessible.
let mut node = MutexNode::new();
// A queue node must be consumed.
let node = MutexNode::new();
// The shared state can only be accessed once the lock is held.
// Our non-atomic increment is safe because we're the only thread
// which can access the shared state when the lock is held.
//
// We unwrap() the return value to assert that we are not expecting
// threads to ever fail while holding the lock.
let mut guard = data.lock(&mut node);
let mut guard = data.lock(node);
*guard += 1;
if *guard == N {
tx.send(()).unwrap();
Expand All @@ -36,9 +36,9 @@ fn main() {
}
let _message = rx.recv();

// A queue node must be mutably accessible.
let mut node = MutexNode::new();
let count = data.lock(&mut node);
// A queue node must be consumed.
let node = MutexNode::new();
let count = data.lock(node);
assert_eq!(*count, N);
// lock is unlock here when `count` goes out of scope.
}
55 changes: 41 additions & 14 deletions src/inner/raw/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -67,6 +67,7 @@ impl<L: Lock> MutexNodeInner<L> {

/// A pointer type that points to a heap allocated queue node.
#[derive(Debug)]
#[repr(transparent)]
pub struct MutexNode<L> {
inner: NonNull<MutexNodeInner<L>>,
}
Expand Down Expand Up @@ -126,7 +127,9 @@ impl<L: Lock> MutexNode<L> {
}

/// Creates a new, heap allocated `MutexNodeInner` and returns a leaked,
/// raw pointer to it. Caller is responsible for freeing the node.
/// raw pointer to it.
///
/// Caller is responsible for freeing the node.
fn unlocked() -> *mut MutexNodeInner<L> {
let node = MutexNodeInner::unlocked();
Box::into_raw(Box::new(node))
Expand Down Expand Up @@ -178,7 +181,7 @@ impl<T, L: Lock, W> Mutex<T, L, W> {

impl<T: ?Sized, L: Lock, W: Wait> Mutex<T, L, W> {
/// Acquires this mutex, blocking the current thread until it is able to do so.
pub fn lock<'a>(&'a self, node: &'a mut MutexNode<L>) -> MutexGuard<'a, T, L, W> {
pub fn lock(&self, mut node: MutexNode<L>) -> MutexGuard<'_, T, L, W> {
// SAFETY: The inner pointer always points to valid nodes allocations
// and we have exclusive access over the node since it has not been
// added to the waiting queue yet.
Expand Down Expand Up @@ -222,9 +225,9 @@ impl<T: ?Sized, L, W> Mutex<T, L, W> {

impl<T: ?Sized + Debug, L: Lock, W: Wait> Debug for Mutex<T, L, W> {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
let mut node = MutexNode::new();
let node = MutexNode::new();
let mut d = f.debug_struct("Mutex");
self.lock(&mut node).with(|data| d.field("data", &data));
self.lock(node).with(|data| d.field("data", &data));
d.finish()
}
}
Expand All @@ -233,7 +236,7 @@ impl<T: ?Sized + Debug, L: Lock, W: Wait> Debug for Mutex<T, L, W> {
/// dropped (falls out of scope), the lock will be unlocked.
pub struct MutexGuard<'a, T: ?Sized, L: Lock, W> {
lock: &'a Mutex<T, L, W>,
head: &'a mut MutexNode<L>,
head: MutexNode<L>,
}

// Rust's `std::sync::MutexGuard` is not Send for pthread compatibility, but this
Expand All @@ -243,7 +246,7 @@ unsafe impl<T: ?Sized + Sync, L: Lock, W> Sync for MutexGuard<'_, T, L, W> {}

impl<'a, T: ?Sized, L: Lock, W> MutexGuard<'a, T, L, W> {
/// Creates a new `MutexGuard` instance.
fn new(lock: &'a Mutex<T, L, W>, head: &'a mut MutexNode<L>) -> Self {
const fn new(lock: &'a Mutex<T, L, W>, head: MutexNode<L>) -> Self {
Self { lock, head }
}

Expand All @@ -255,23 +258,47 @@ impl<'a, T: ?Sized, L: Lock, W> MutexGuard<'a, T, L, W> {
// SAFETY: A guard instance holds the lock locked.
unsafe { self.lock.data.with_unchecked(f) }
}
}

impl<'a, T: ?Sized, L: Lock, W> Drop for MutexGuard<'a, T, L, W> {
/// Unlocks the guard and returns a node instance that can be reused by
/// another locking operation.
///
/// Consumes the guard without calling `drop`.
#[must_use]
pub fn into_node(mut self) -> MutexNode<L> {
// SAFETY: We are only ever calling unlock once, since we "forget" the
// guard, therefore the guard's `drop` call will not be called.
unsafe { self.unlock() }
let inner = self.head.inner;
core::mem::forget(self);
MutexNode { inner }
}

/// Unlocks the mutex associated with this guard.
#[inline]
fn drop(&mut self) {
///
/// # Safety
///
/// This function mut not be called twice over the same `self` reference.
unsafe fn unlock(&mut self) {
// SAFETY: The inner pointer always points to valid nodes allocations.
let inner = unsafe { self.head.inner.as_ref() };
let prev = inner.prev.get();
inner.lock.notify();
// SAFETY: The memory was allocated through the Box API, therefore it
// fulfills the layout requirements. The pointer is guaranteed to not
// be null, since the tail is initialized with a valid allocation, and
// all tail updates point to valid, heap allocated nodes. The drop call
// is only ever run once for any value, and this instance is the only
// pointer to this heap allocated node at drop call time.
unsafe { MutexNode::set(self.head, prev) }
// all tail updates point to valid, heap allocated nodes. The caller
// guaranteed that this function is only ever called once over the same
// self reference.
unsafe { MutexNode::set(&mut self.head, prev) }
}
}

impl<'a, T: ?Sized, L: Lock, W> Drop for MutexGuard<'a, T, L, W> {
#[inline]
fn drop(&mut self) {
// The drop call is only ever run once for any value, and this instance
// is the only pointer to this heap allocated node at drop call time.
unsafe { self.unlock() }
}
}

Expand Down
12 changes: 6 additions & 6 deletions src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -58,15 +58,15 @@
//! let c_mutex = Arc::clone(&mutex);
//!
//! thread::spawn(move || {
//! // A queue node handle must be mutably accessible.
//! let mut node = MutexNode::new();
//! *c_mutex.lock(&mut node) = 10;
//! // A queue node handle must consumed.
//! let node = MutexNode::new();
//! *c_mutex.lock(node) = 10;
//! })
//! .join().expect("thread::spawn failed");
//!
//! // A queue node handle must be mutably accessible.
//! let mut node = MutexNode::new();
//! assert_eq!(*mutex.lock(&mut node), 10);
//! // A queue node handle must be consumed.
//! let node = MutexNode::new();
//! assert_eq!(*mutex.lock(node), 10);
//! ```
//!
//! ## Features
Expand Down
20 changes: 10 additions & 10 deletions src/raw/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -45,8 +45,8 @@ pub mod spins {
/// use clhlock::raw::{spins::Mutex, MutexNode};
///
/// let mutex = Mutex::new(0);
/// let mut node = MutexNode::new();
/// let guard = mutex.lock(&mut node);
/// let node = MutexNode::new();
/// let guard = mutex.lock(node);
/// assert_eq!(*guard, 0);
/// ```
/// [`raw::Mutex`]: mutex::Mutex
Expand Down Expand Up @@ -74,8 +74,8 @@ pub mod spins {
/// use clhlock::raw::{spins::backoff::Mutex, MutexNode};
///
/// let mutex = Mutex::new(0);
/// let mut node = MutexNode::new();
/// let guard = mutex.lock(&mut node);
/// let node = MutexNode::new();
/// let guard = mutex.lock(node);
/// assert_eq!(*guard, 0);
/// ```
/// [`raw::Mutex`]: mutex::Mutex
Expand Down Expand Up @@ -107,8 +107,8 @@ pub mod yields {
/// use clhlock::raw::{yields::Mutex, MutexNode};
///
/// let mutex = Mutex::new(0);
/// let mut node = MutexNode::new();
/// let guard = mutex.lock(&mut node);
/// let node = MutexNode::new();
/// let guard = mutex.lock(node);
/// assert_eq!(*guard, 0);
/// ```
/// [`raw::Mutex`]: mutex::Mutex
Expand Down Expand Up @@ -136,8 +136,8 @@ pub mod yields {
/// use clhlock::raw::{yields::backoff::Mutex, MutexNode};
///
/// let mutex = Mutex::new(0);
/// let mut node = MutexNode::new();
/// let guard = mutex.lock(&mut node);
/// let node = MutexNode::new();
/// let guard = mutex.lock(node);
/// assert_eq!(*guard, 0);
/// ```
/// [`raw::Mutex`]: mutex::Mutex
Expand Down Expand Up @@ -167,8 +167,8 @@ pub mod loops {
/// use clhlock::raw::{loops::Mutex, MutexNode};
///
/// let mutex = Mutex::new(0);
/// let mut node = MutexNode::new();
/// let guard = mutex.lock(&mut node);
/// let node = MutexNode::new();
/// let guard = mutex.lock(node);
/// assert_eq!(*guard, 0);
/// ```
/// [`raw::Mutex`]: mutex::Mutex
Expand Down
Loading

0 comments on commit 53d1ea4

Please sign in to comment.