Skip to content

Commit

Permalink
feat(maitake-sync): mutex-traits integration (#482)
Browse files Browse the repository at this point in the history
Currently, `maitake-sync`'s blocking `Mutex` and `RwLock` use spinlocks
to wait for the lock to become available. This works on (almost) all
platforms,[^cas] but there are a number of reasons it might be less than
desirable. For example:

- You might want an IRQ-safe mutex so that data protected by the mutex
  can also be shared with interrupt handlers. In that case, the mutex
  must also disable IRQs when locked, and re-enable them when unlocked.
- You might want to use an alternative mechanism for waiting, such as
  hardware lock elision, an OS mutex or something when not on
  `#![no_std]`, or an existing library RTOS mutex or similar.
- You might be on a strictly single-threaded platform where any
  "waiting" is inherently a deadlock, and you would prefer to make any
  attempt to lock an already-locked mutex into a panic instead.

Therefore, @jamesmunns and I have written the [`mutex-traits`] crate, to
abstract over mutex implementations. This trait is similar to
[`lock_api`], but with a `ScopedRawMutex` trait to support mutexen based
on [`critical-section`], which cannot be locked and unlocked at
arbitrary times and instead requires a closure.

This branch updates `maitake-sync` to use [`mutex-traits`]. In
particular:

- The blocking `Mutex` type is generic over a `Lock` type parameter,
  which may implement `mutex_traits::RawMutex` or
  `mutex_traits::ScopedRawMutex`.
- `Mutex` has scoped lock methods when `Lock: ScopedRawMutex`, and guard
  methods require `Lock: RawMutex`.
- All async primitives that use a blocking `Mutex` are now generic over
  a `Lock` type for the underlying mutex. Everything except for
  `Semaphore` and `RwLock` only requires `ScopedRawMutex`, but
  `Semaphore` and `RwLock` require `RawMutex`.
- By default, `Mutex`es are constructed with a `DefaultMutex` as the
  `ScopedRawMutex` impl, which tries to pick the right behavior based on
  features/target config. This only implements `ScopedRawMutex`.

[`mutex-traits`]: https://crates.io/crates/mutex-traits
[`lock_api`]: https://crates.io/crates/lock-api
[`critical-section`]: https://crates.io/crates/critical-section
[^cas]: Provided they have atomic CAS...

BREAKING CHANGE:

Renamed `spin::Mutex` and `spin::RwLock` to `blocking::Mutex` and
`blocking::RwLock`.

`blocking::Mutex::new` now constructs a `blocking::Mutex<T,
DefaultMutex>`,
which only provides scoped lock methods. To construct a
mutex that also has RAII guard lock methods, use
`blocking::Mutex::new_with_raw_mutex` to provide a `RawMutex`
implementation.
  • Loading branch information
hawkw authored Aug 10, 2024
1 parent 0692c10 commit 99da7e1
Show file tree
Hide file tree
Showing 44 changed files with 2,855 additions and 1,134 deletions.
18 changes: 18 additions & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -104,4 +104,4 @@ tracing-core = { git = "https://github.com/tokio-rs/tracing" }
[profile.loom]
inherits = "test"
lto = true
opt-level = 3
opt-level = 3
114 changes: 60 additions & 54 deletions alloc/src/buddy.rs
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ use mycelium_util::intrusive::{list, Linked, List};
use mycelium_util::math::Logarithm;
use mycelium_util::sync::{
atomic::{AtomicUsize, Ordering::*},
spin,
blocking::Mutex,
};

#[derive(Debug)]
Expand All @@ -41,7 +41,7 @@ pub struct Alloc<const FREE_LISTS: usize> {
/// Array of free lists by "order". The order of an block is the number
/// of times the minimum page size must be doubled to reach that block's
/// size.
free_lists: [spin::Mutex<List<Free>>; FREE_LISTS],
free_lists: [Mutex<List<Free>>; FREE_LISTS],
}

type Result<T> = core::result::Result<T, AllocErr>;
Expand All @@ -65,7 +65,7 @@ impl<const FREE_LISTS: usize> Alloc<FREE_LISTS> {
//
// see https://github.com/rust-lang/rust-clippy/issues/7665
#[allow(clippy::declare_interior_mutable_const)]
const ONE_FREE_LIST: spin::Mutex<List<Free>> = spin::Mutex::new(List::new());
const ONE_FREE_LIST: Mutex<List<Free>> = Mutex::new(List::new());

// ensure we don't split memory into regions too small to fit the free
// block header in them.
Expand Down Expand Up @@ -190,16 +190,12 @@ impl<const FREE_LISTS: usize> Alloc<FREE_LISTS> {
let _span =
tracing::debug_span!("free_list", order, size = self.size_for_order(order),)
.entered();
match list.try_lock() {
Some(list) => {
for entry in list.iter() {
tracing::debug!("entry={entry:?}");
}
}
None => {
tracing::debug!("<THIS IS THE ONE WHERE THE PANIC HAPPENED LOL>");
list.try_with_lock(|list| {
for entry in list.iter() {
tracing::debug!("entry={entry:?}");
}
}
})
.unwrap_or_else(|| tracing::debug!("<THIS IS THE ONE WHERE THE PANIC HAPPENED LOL>"))
}
}

Expand Down Expand Up @@ -283,29 +279,35 @@ impl<const FREE_LISTS: usize> Alloc<FREE_LISTS> {
tracing::trace!(curr_order = idx + order);

// Is there an available block on this free list?
let mut free_list = free_list.lock();
if let Some(mut block) = free_list.pop_back() {
let block = unsafe { block.as_mut() };
tracing::trace!(?block, ?free_list, "found");

// Unless this is the free list on which we'd expect to find a
// block of the requested size (the first free list we checked),
// the block is larger than the requested allocation. In that
// case, we'll need to split it down and push the remainder onto
// the appropriate free lists.
if idx > 0 {
let curr_order = idx + order;
tracing::trace!(?curr_order, ?order, "split down");
self.split_down(block, curr_order, order);
}
let allocated = free_list.with_lock(|free_list| {
if let Some(mut block) = free_list.pop_back() {
let block = unsafe { block.as_mut() };
tracing::trace!(?block, ?free_list, "found");

// Unless this is the free list on which we'd expect to find a
// block of the requested size (the first free list we checked),
// the block is larger than the requested allocation. In that
// case, we'll need to split it down and push the remainder onto
// the appropriate free lists.
if idx > 0 {
let curr_order = idx + order;
tracing::trace!(?curr_order, ?order, "split down");
self.split_down(block, curr_order, order);
}

// Change the block's magic to indicate that it is allocated, so
// that we can avoid checking the free list if we try to merge
// it before the first word is written to.
block.make_busy();
tracing::trace!(?block, "made busy");
self.allocated_size.fetch_add(block.size(), Release);
return Some(block.into());
// Change the block's magic to indicate that it is allocated, so
// that we can avoid checking the free list if we try to merge
// it before the first word is written to.
block.make_busy();
tracing::trace!(?block, "made busy");
self.allocated_size.fetch_add(block.size(), Release);
Some(block.into())
} else {
None
}
});
if let Some(block) = allocated {
return Some(block);
}
}
None
Expand Down Expand Up @@ -334,25 +336,29 @@ impl<const FREE_LISTS: usize> Alloc<FREE_LISTS> {
// Starting at the minimum order on which the freed range will fit
for (idx, free_list) in self.free_lists.as_ref()[min_order..].iter().enumerate() {
let curr_order = idx + min_order;
let mut free_list = free_list.lock();

// Is there a free buddy block at this order?
if let Some(mut buddy) = unsafe { self.take_buddy(block, curr_order, &mut free_list) } {
// Okay, merge the blocks, and try the next order!
if buddy < block {
mem::swap(&mut block, &mut buddy);
}
unsafe {
block.as_mut().merge(buddy.as_mut());
let done = free_list.with_lock(|free_list| {
// Is there a free buddy block at this order?
if let Some(mut buddy) = unsafe { self.take_buddy(block, curr_order, free_list) } {
// Okay, merge the blocks, and try the next order!
if buddy < block {
mem::swap(&mut block, &mut buddy);
}
unsafe {
block.as_mut().merge(buddy.as_mut());
}
tracing::trace!(?buddy, ?block, "merged with buddy");
// Keep merging!
false
} else {
// Okay, we can't keep merging, so push the block on the current
// free list.
free_list.push_front(block);
tracing::trace!("deallocated block");
self.allocated_size.fetch_sub(size, Release);
true
}
tracing::trace!(?buddy, ?block, "merged with buddy");
// Keep merging!
} else {
// Okay, we can't keep merging, so push the block on the current
// free list.
free_list.push_front(block);
tracing::trace!("deallocated block");
self.allocated_size.fetch_sub(size, Release);
});
if done {
return Ok(());
}
}
Expand All @@ -369,7 +375,7 @@ impl<const FREE_LISTS: usize> Alloc<FREE_LISTS> {
if order > free_lists.len() {
todo!("(eliza): choppity chop chop down the block!");
}
free_lists[order].lock().push_front(block);
free_lists[order].with_lock(|list| list.push_front(block));
let mut sz = self.heap_size.load(Acquire);
while let Err(actual) =
// TODO(eliza): if this overflows that's bad news lol...
Expand Down Expand Up @@ -473,7 +479,7 @@ impl<const FREE_LISTS: usize> Alloc<FREE_LISTS> {
.split_back(size, self.offset())
.expect("block too small to split!");
tracing::trace!(?block, ?new_block);
free_lists[order].lock().push_front(new_block);
free_lists[order].with_lock(|list| list.push_front(new_block));
}
}
}
Expand Down
3 changes: 2 additions & 1 deletion hal-core/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ license = "MIT"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html

[dependencies]
tracing = { git = "https://github.com/tokio-rs/tracing", default_features = false }
tracing = { git = "https://github.com/tokio-rs/tracing", default_features = false }
maitake-sync = { path = "../maitake-sync", default-features = false }
mycelium-util = { path = "../util" }
embedded-graphics-core = { version = "0.3", optional = true }
5 changes: 4 additions & 1 deletion hal-core/src/framebuffer.rs
Original file line number Diff line number Diff line change
@@ -1,11 +1,13 @@
use core::ops::{Deref, DerefMut};
use maitake_sync::blocking;

#[cfg(feature = "embedded-graphics-core")]
#[doc(cfg(feature = "embedded-graphics-core"))]
mod embedded_graphics;
#[cfg(feature = "embedded-graphics-core")]
#[doc(cfg(feature = "embedded-graphics-core"))]
pub use self::embedded_graphics::*;

pub trait Draw {
/// Return the width of the framebuffer in pixels.
fn width(&self) -> usize;
Expand Down Expand Up @@ -185,9 +187,10 @@ macro_rules! deref_draw_body {
};
}

impl<'lock, D> Draw for mycelium_util::sync::spin::MutexGuard<'lock, D>
impl<'lock, D, L> Draw for blocking::MutexGuard<'lock, D, L>
where
D: Draw,
L: blocking::RawMutex,
{
deref_draw_body! {}
}
Expand Down
18 changes: 11 additions & 7 deletions hal-x86_64/src/interrupt.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,11 @@ use hal_core::interrupt::Control;
use hal_core::interrupt::{ctx, Handlers};
use mycelium_util::{
bits, fmt,
sync::{spin, InitOnce},
sync::{
blocking::{Mutex, MutexGuard},
spin::Spinlock,
InitOnce,
},
};

pub mod apic;
Expand Down Expand Up @@ -56,7 +60,7 @@ pub struct Interrupt<T = ()> {
enum InterruptModel {
/// Interrupts are handled by the [8259 Programmable Interrupt Controller
/// (PIC)](pic).
Pic(spin::Mutex<pic::CascadedPic>),
Pic(Mutex<pic::CascadedPic, Spinlock>),
/// Interrupts are handled by the [local] and [I/O] [Advanced Programmable
/// Interrupt Controller (APIC)s][apics].
///
Expand All @@ -68,7 +72,7 @@ enum InterruptModel {
// TODO(eliza): allow further configuration of the I/O APIC (e.g.
// masking/unmasking stuff...)
#[allow(dead_code)]
io: spin::Mutex<apic::IoApic>,
io: Mutex<apic::IoApic, Spinlock>,
},
}

Expand Down Expand Up @@ -127,13 +131,13 @@ pub struct Registers {
_pad2: [u16; 3],
}

static IDT: spin::Mutex<idt::Idt> = spin::Mutex::new(idt::Idt::new());
static IDT: Mutex<idt::Idt, Spinlock> = Mutex::new_with_raw_mutex(idt::Idt::new(), Spinlock::new());
static INTERRUPT_CONTROLLER: InitOnce<Controller> = InitOnce::uninitialized();

impl Controller {
// const DEFAULT_IOAPIC_BASE_PADDR: u64 = 0xFEC00000;

pub fn idt() -> spin::MutexGuard<'static, idt::Idt> {
pub fn idt() -> MutexGuard<'static, idt::Idt, Spinlock> {
IDT.lock()
}

Expand Down Expand Up @@ -207,7 +211,7 @@ impl Controller {

InterruptModel::Apic {
local,
io: spin::Mutex::new(io),
io: Mutex::new_with_raw_mutex(io, Spinlock::new()),
}
}
model => {
Expand All @@ -225,7 +229,7 @@ impl Controller {
// clear for you, the reader, that at this point they are definitely intentionally enabled.
pics.enable();
}
InterruptModel::Pic(spin::Mutex::new(pics))
InterruptModel::Pic(Mutex::new_with_raw_mutex(pics, Spinlock::new()))
}
};
tracing::trace!(interrupt_model = ?model);
Expand Down
12 changes: 8 additions & 4 deletions hal-x86_64/src/serial.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,11 @@ use crate::cpu;
use core::{fmt, marker::PhantomData};
use mycelium_util::{
io,
sync::{spin, Lazy},
sync::{
blocking::{Mutex, MutexGuard},
spin::Spinlock,
Lazy,
},
};

static COM1: Lazy<Option<Port>> = Lazy::new(|| Port::new(0x3F8).ok());
Expand All @@ -29,7 +33,7 @@ pub fn com4() -> Option<&'static Port> {

// #[derive(Debug)]
pub struct Port {
inner: spin::Mutex<Registers>,
inner: Mutex<Registers, Spinlock>,
}

// #[derive(Debug)]
Expand All @@ -40,7 +44,7 @@ pub struct Lock<'a, B = Blocking> {
}

struct LockInner<'a> {
inner: spin::MutexGuard<'a, Registers>,
inner: MutexGuard<'a, Registers, Spinlock>,
prev_divisor: Option<u16>,
}

Expand Down Expand Up @@ -110,7 +114,7 @@ impl Port {
})?;

Ok(Self {
inner: spin::Mutex::new(registers),
inner: Mutex::new_with_raw_mutex(registers, Spinlock::new()),
})
}

Expand Down
4 changes: 2 additions & 2 deletions hal-x86_64/src/time/pit.rs
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ use core::{
use mycelium_util::{
bits::{bitfield, enum_from_bits},
fmt,
sync::spin::Mutex,
sync::{blocking::Mutex, spin::Spinlock},
};

/// Intel 8253/8254 Programmable Interval Timer (PIT).
Expand Down Expand Up @@ -210,7 +210,7 @@ enum_from_bits! {
/// publicly and is represented as a singleton. It's stored in a [`Mutex`] in
/// order to ensure that multiple CPU cores don't try to write conflicting
/// configurations to the PIT's configuration ports.
pub static PIT: Mutex<Pit> = Mutex::new(Pit::new());
pub static PIT: Mutex<Pit, Spinlock> = Mutex::new_with_raw_mutex(Pit::new(), Spinlock::new());

/// Are we currently sleeping on an interrupt?
static SLEEPING: AtomicBool = AtomicBool::new(false);
Expand Down
Loading

0 comments on commit 99da7e1

Please sign in to comment.