Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Implemented various backoffs and per-CPU rng #130

Merged
merged 1 commit into from
Jul 18, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
23 changes: 23 additions & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 1 addition & 0 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,7 @@ members = [
"modules/ruxruntime",
"modules/ruxtask",
"modules/ruxfutex",
"modules/ruxrand",

"api/ruxfeat",
"api/arceos_api",
Expand Down
5 changes: 4 additions & 1 deletion crates/spinlock/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,10 @@
name = "spinlock"
version = "0.1.0"
edition = "2021"
authors = ["Yuekai Jia <[email protected]>"]
authors = [
"Yuekai Jia <[email protected]>",
"Igna <[email protected]>",
]
description = "`no_std` spin lock implementation that can disable kernel local IRQs or preemption while locking"
license = "GPL-3.0-or-later OR Apache-2.0"
homepage = "https://github.com/rcore-os/arceos"
Expand Down
51 changes: 37 additions & 14 deletions crates/spinlock/src/base.rs
Original file line number Diff line number Diff line change
Expand Up @@ -24,17 +24,23 @@ use core::sync::atomic::{AtomicBool, Ordering};

use kernel_guard::BaseGuard;

use crate::{strategy, Strategy};

/// The default strategy used in spinlocks.
pub type DefaultStrategy = strategy::Once;

/// A [spin lock](https://en.m.wikipedia.org/wiki/Spinlock) providing mutually
/// exclusive access to data.
///
/// This is a base struct, the specific behavior depends on the generic
/// parameter `G` that implements [`BaseGuard`], such as whether to disable
/// local IRQs or kernel preemption before acquiring the lock.
/// local IRQs or kernel preemption before acquiring the lock. The parameter `S`
/// that implements [`Strategy`] defines the behavior when encountering contention.
///
/// For single-core environment (without the "smp" feature), we remove the lock
/// state, CPU can always get the lock if we follow the proper guard in use.
pub struct BaseSpinLock<G: BaseGuard, T: ?Sized> {
_phantom: PhantomData<G>,
pub struct BaseSpinLock<DG: BaseGuard, T: ?Sized, S: Strategy = DefaultStrategy> {
_phantom: PhantomData<(DG, S)>,
#[cfg(feature = "smp")]
lock: AtomicBool,
data: UnsafeCell<T>,
Expand All @@ -52,10 +58,10 @@ pub struct BaseSpinLockGuard<'a, G: BaseGuard, T: ?Sized + 'a> {
}

// Same unsafe impls as `std::sync::Mutex`
unsafe impl<G: BaseGuard, T: ?Sized + Send> Sync for BaseSpinLock<G, T> {}
unsafe impl<G: BaseGuard, T: ?Sized + Send> Send for BaseSpinLock<G, T> {}
unsafe impl<G: BaseGuard, T: ?Sized + Send, B: Strategy> Sync for BaseSpinLock<G, T, B> {}
unsafe impl<G: BaseGuard, T: ?Sized + Send, B: Strategy> Send for BaseSpinLock<G, T, B> {}

impl<G: BaseGuard, T> BaseSpinLock<G, T> {
impl<G: BaseGuard, T, S: Strategy> BaseSpinLock<G, T, S> {
/// Creates a new [`BaseSpinLock`] wrapping the supplied data.
#[inline(always)]
pub const fn new(data: T) -> Self {
Expand All @@ -77,26 +83,33 @@ impl<G: BaseGuard, T> BaseSpinLock<G, T> {
}
}

impl<G: BaseGuard, T: ?Sized> BaseSpinLock<G, T> {
/// Locks the [`BaseSpinLock`] and returns a guard that permits access to the inner data.
impl<G: BaseGuard, T: ?Sized, S: Strategy> BaseSpinLock<G, T, S> {
/// Locks the [`BaseSpinLock`] using the given guard type and backoff strategy,
/// and returns a guard that permits access to the inner data.
///
/// The returned value may be dereferenced for data access
/// and the lock will be dropped when the guard falls out of scope.
#[inline(always)]
pub fn lock(&self) -> BaseSpinLockGuard<G, T> {
let irq_state = G::acquire();
pub fn lock_as<GT: BaseGuard, ST: Strategy>(&self) -> BaseSpinLockGuard<GT, T> {
let irq_state = GT::acquire();

#[cfg(feature = "smp")]
{
use crate::strategy::{Backoff, Relax};

let mut backoff = <ST as Strategy>::new_backoff();
// Can fail to lock even if the spinlock is not locked. May be more efficient than `try_lock`
// when called in a loop.
while self
.lock
.compare_exchange_weak(false, true, Ordering::Acquire, Ordering::Relaxed)
.is_err()
{
backoff.backoff();
let mut relax = <ST as Strategy>::new_relax();

// Wait until the lock looks unlocked before retrying
while self.is_locked() {
core::hint::spin_loop();
relax.relax();
}
}
}
Expand All @@ -109,6 +122,16 @@ impl<G: BaseGuard, T: ?Sized> BaseSpinLock<G, T> {
}
}

/// Locks the [`BaseSpinLock`] using the "default" strategy specified by lock type,
/// and returns a guard that permits access to the inner data.
///
/// The returned value may be dereferenced for data access
/// and the lock will be dropped when the guard falls out of scope.
#[inline(always)]
pub fn lock(&self) -> BaseSpinLockGuard<G, T> {
self.lock_as::<G, S>()
}

/// Returns `true` if the lock is currently held.
///
/// # Safety
Expand Down Expand Up @@ -183,14 +206,14 @@ impl<G: BaseGuard, T: ?Sized> BaseSpinLock<G, T> {
}
}

impl<G: BaseGuard, T: ?Sized + Default> Default for BaseSpinLock<G, T> {
impl<G: BaseGuard, T: ?Sized + Default, S: Strategy> Default for BaseSpinLock<G, T, S> {
#[inline(always)]
fn default() -> Self {
Self::new(Default::default())
}
}

impl<G: BaseGuard, T: ?Sized + fmt::Debug> fmt::Debug for BaseSpinLock<G, T> {
impl<G: BaseGuard, T: ?Sized + fmt::Debug, S: Strategy> fmt::Debug for BaseSpinLock<G, T, S> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self.try_lock() {
Some(guard) => write!(f, "SpinLock {{ data: ")
Expand Down
14 changes: 11 additions & 3 deletions crates/spinlock/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -16,15 +16,23 @@
//! environment (without this feature), the lock state is unnecessary and
//! optimized out. CPU can always get the lock if we follow the proper guard
//! in use. By default, this feature is disabled.
//! - `rand`: Provide extra contention-alleviating strategy using exponential
//! backoff algorithm. The user is responsible for providing the random number
//! generator implementation.

#![cfg_attr(not(test), no_std)]

mod base;

use kernel_guard::{NoOp, NoPreempt, NoPreemptIrqSave};
/// Defines the strategies used when encountering lock contention.
pub mod strategy;

use kernel_guard::{NoPreempt, NoPreemptIrqSave};

pub use self::base::{BaseSpinLock, BaseSpinLockGuard};

pub use self::strategy::*;

/// A spin lock that disables kernel preemption while trying to lock, and
/// re-enables it after unlocking.
///
Expand All @@ -48,7 +56,7 @@ pub type SpinNoIrqGuard<'a, T> = BaseSpinLockGuard<'a, NoPreemptIrqSave, T>;
///
/// It must be used in the preemption-disabled and local IRQ-disabled context,
/// or never be used in interrupt handlers.
pub type SpinRaw<T> = BaseSpinLock<NoOp, T>;
pub type SpinRaw<T> = BaseSpinLock<kernel_guard::NoOp, T>;

/// A guard that provides mutable data access for [`SpinRaw`].
pub type SpinRawGuard<'a, T> = BaseSpinLockGuard<'a, NoOp, T>;
pub type SpinRawGuard<'a, T> = BaseSpinLockGuard<'a, kernel_guard::NoOp, T>;
161 changes: 161 additions & 0 deletions crates/spinlock/src/strategy.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,161 @@
/* Copyright (c) [2023] [Syswonder Community]
* [Ruxos] is licensed under Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
* http://license.coscl.org.cn/MulanPSL2
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
*/

use core::marker::PhantomData;

// Re-exports extra strategies when feature enabled.
#[cfg(feature = "rand")]
pub use crate::rand_strategy::*;

#[inline(always)]
fn exp_backoff(current_limit: &mut u32, max: u32) {
let limit = *current_limit;
*current_limit = max.max(limit << 1);
for _ in 0..limit {
core::hint::spin_loop();
}
}

/// Defines the backoff behavior of a spinlock.
pub trait Backoff {
/// Backoff behavior when failed to acquire the lock.
fn backoff(&mut self);
}

/// Defines the relax behavior of a spinlock.
pub trait Relax {
/// Relax behavior when the lock seemed still held.
fn relax(&mut self);
}

/// Defines the lock behavior when encountering contention.
/// [`Backoff::backoff`] is called when failed to acquire the lock, and
/// [`Relax::relax`]` is called when the lock seemed still held.
///
/// One can easily define a new [`Strategy`] impl that
/// combines existing backoff/relax behaviors.
pub trait Strategy {
/// The type that defines the relax behavior.
type Relax: Relax;

/// The type that defines the backoff behavior.
type Backoff: Backoff;

/// Create a new relax state every time after failed to acquire the lock.
fn new_relax() -> Self::Relax;

/// Create a new backoff state every time after the locking procedure began.
fn new_backoff() -> Self::Backoff;
}

impl<T: Relax + Backoff + Default> Strategy for T {
type Relax = T;
type Backoff = T;

#[inline(always)]
fn new_relax() -> Self::Relax {
T::default()
}

#[inline(always)]
fn new_backoff() -> Self::Backoff {
T::default()
}
}

/// Do nothing when backoff/relax is required.
/// It can be used as a baseline, or under rare circumstances be used as a
/// performance improvement.
///
/// Note that under most modern CPU design, not using any backoff/relax strategy
/// would normally make things slower.
#[derive(Debug, Default)]
pub struct NoOp;

/// Call [`core::hint::spin_loop`] once when backoff/relax is required.
///
/// This may improve performance by said, reducing bus traffic. The exact
/// behavior and benefits depend on the machine.
#[derive(Debug, Default)]
pub struct Once;

/// Call [`core::hint::spin_loop`] with exponentially increased time when
/// backoff/relax is required.
///
/// This would generally increase performance when the lock is highly contended.
#[derive(Debug)]
pub struct Exp<const MAX: u32>(u32);

/// Combines a [`Relax`] and a [`Backoff`] into a strategy.
#[derive(Debug, Default)]
pub struct Combine<R: Relax, B: Backoff>(PhantomData<(R, B)>);

impl Relax for NoOp {
#[inline(always)]
fn relax(&mut self) {}
}

impl Backoff for NoOp {
#[inline(always)]
fn backoff(&mut self) {}
}

impl Relax for Once {
#[inline(always)]
fn relax(&mut self) {
core::hint::spin_loop();
}
}

impl Backoff for Once {
#[inline(always)]
fn backoff(&mut self) {
core::hint::spin_loop();
}
}

impl<const N: u32> Relax for Exp<N> {
#[inline(always)]
fn relax(&mut self) {
exp_backoff(&mut self.0, N);
}
}

impl<const N: u32> Backoff for Exp<N> {
#[inline(always)]
fn backoff(&mut self) {
exp_backoff(&mut self.0, N);
}
}

impl<const N: u32> Default for Exp<N> {
#[inline(always)]
fn default() -> Self {
Self(1)
}
}

impl<R, B> Strategy for Combine<R, B>
where
R: Relax + Default,
B: Backoff + Default,
{
type Relax = R;
type Backoff = B;

#[inline(always)]
fn new_relax() -> Self::Relax {
R::default()
}

#[inline(always)]
fn new_backoff() -> Self::Backoff {
B::default()
}
}
Loading
Loading