Skip to content

Commit

Permalink
locking: define lock types that use TPR for reentrancy protection
Browse files Browse the repository at this point in the history
Using TPR for reentrancy protection means that higher priority
interrupts can continue to be delivered while locks are held.  Each
TPR-protected lock defines the TPR with which it is associated, so any
attempt to acquire the lock from a higher priority interrupt will panic
due to TPR inversion.

Signed-off-by: Jon Lange <[email protected]>
  • Loading branch information
msft-jlange committed Dec 17, 2024
1 parent a5d13a9 commit 3a47166
Show file tree
Hide file tree
Showing 5 changed files with 122 additions and 34 deletions.
58 changes: 41 additions & 17 deletions kernel/src/locking/common.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,53 +3,77 @@
// Copyright (c) 2024 SUSE LLC
//
// Author: Joerg Roedel <[email protected]>
use crate::cpu::IrqGuard;
use crate::cpu::{IrqGuard, TprGuard};
use core::marker::PhantomData;

/// Abstracts IRQ state handling when taking and releasing locks. There are two
/// implemenations:
/// Abstracts TPR and interrupt state handling when taking and releasing
/// locks. There are three implemenations:
///
/// * [IrqUnsafeLocking] implements the methods as no-ops and does not change
/// any IRQ state.
/// * [IrqSafeLocking] actually disables and enables IRQs in the methods,
/// making a lock IRQ-safe by using this structure.
/// any IRQ or TPR state.
/// * [IrqGuardLocking] actually disables and enables IRQs in the methods,
/// ensuring that no interrupt can be taken while the lock is held.
/// * [TprGuardLocking] raises and lowers TPR while the lock is held,
/// ensuring that no higher priority interrupt can be taken while the lock
/// is held. This will panic when attempting to acquire a lower priority
/// lock from a higher priority interrupt context.
pub trait IrqLocking {
/// Associated helper function to disable IRQs and create an instance of
/// the implementing struct. This is used by lock implementations.
/// Associated helper function to modify TPR/interrupt state when a lock
/// is acquired. This is used by lock implementations and will return an
/// instance of the object.
///
/// # Returns
///
/// New instance of implementing struct.
fn irqs_disable() -> Self;
fn acquire_lock() -> Self;
}

/// Implements the IRQ state handling methods as no-ops. For use it IRQ-unsafe
/// locks.
/// Implements the IRQ state handling methods as no-ops. Locks defined with
/// this state handler are not safe with respect to reentrancy due to
/// interrupt delivery.
#[derive(Debug, Default)]
pub struct IrqUnsafeLocking;

impl IrqLocking for IrqUnsafeLocking {
fn irqs_disable() -> Self {
fn acquire_lock() -> Self {
Self {}
}
}

/// Properly implements the IRQ state handling methods. For use it IRQ-safe
/// locks.
/// Implements the state handling methods for locks that disable interrupts.
#[derive(Debug, Default)]
pub struct IrqSafeLocking {
pub struct IrqGuardLocking {
/// IrqGuard to keep track of IRQ state. IrqGuard implements Drop, which
/// will re-enable IRQs when the struct goes out of scope.
_guard: IrqGuard,
/// Make type explicitly !Send + !Sync
phantom: PhantomData<*const ()>,
}

impl IrqLocking for IrqSafeLocking {
fn irqs_disable() -> Self {
impl IrqLocking for IrqGuardLocking {
fn acquire_lock() -> Self {
Self {
_guard: IrqGuard::new(),
phantom: PhantomData,
}
}
}

/// Implements the state handling methods for locks that raise and lower TPR.
#[derive(Debug, Default)]
pub struct TprGuardLocking<const TPR: usize> {
/// TprGuard to keep track of IRQ state. TprGuard implements Drop, which
/// will lower TPR as required when the struct goes out of scope.
_guard: TprGuard,
/// Make type explicitly !Send + !Sync
phantom: PhantomData<*const ()>,
}

impl<const TPR: usize> IrqLocking for TprGuardLocking<TPR> {
fn acquire_lock() -> Self {
Self {
_guard: TprGuard::raise(TPR),
phantom: PhantomData,
}
}
}
11 changes: 7 additions & 4 deletions kernel/src/locking/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -8,9 +8,12 @@ pub mod common;
pub mod rwlock;
pub mod spinlock;

pub use common::{IrqLocking, IrqSafeLocking, IrqUnsafeLocking};
pub use common::{IrqGuardLocking, IrqLocking, TprGuardLocking};
pub use rwlock::{
RWLock, RWLockIrqSafe, ReadLockGuard, ReadLockGuardIrqSafe, WriteLockGuard,
WriteLockGuardIrqSafe,
RWLock, RWLockAnyTpr, RWLockIrqSafe, RWLockTpr, ReadLockGuard, ReadLockGuardAnyTpr,
ReadLockGuardIrqSafe, WriteLockGuard, WriteLockGuardAnyTpr, WriteLockGuardIrqSafe,
};
pub use spinlock::{
LockGuard, LockGuardAnyTpr, LockGuardIrqSafe, RawLockGuard, SpinLock, SpinLockAnyTpr,
SpinLockIrqSafe, SpinLockTpr,
};
pub use spinlock::{LockGuard, LockGuardIrqSafe, RawLockGuard, SpinLock, SpinLockIrqSafe};
48 changes: 42 additions & 6 deletions kernel/src/locking/rwlock.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
// Author: Joerg Roedel <[email protected]>

use super::common::*;
use crate::types::TPR_LOCK;
use core::cell::UnsafeCell;
use core::marker::PhantomData;
use core::ops::{Deref, DerefMut};
Expand Down Expand Up @@ -41,7 +42,9 @@ impl<T, I> Deref for RawReadLockGuard<'_, T, I> {
}

pub type ReadLockGuard<'a, T> = RawReadLockGuard<'a, T, IrqUnsafeLocking>;
pub type ReadLockGuardIrqSafe<'a, T> = RawReadLockGuard<'a, T, IrqSafeLocking>;
pub type ReadLockGuardIrqSafe<'a, T> = RawReadLockGuard<'a, T, IrqGuardLocking>;
pub type ReadLockGuardAnyTpr<'a, T, const TPR: usize> =
RawReadLockGuard<'a, T, TprGuardLocking<TPR>>;

/// A guard that provides exclusive write access to the data protected by [`RWLock`]
#[derive(Debug)]
Expand Down Expand Up @@ -81,7 +84,9 @@ impl<T, I> DerefMut for RawWriteLockGuard<'_, T, I> {
}

pub type WriteLockGuard<'a, T> = RawWriteLockGuard<'a, T, IrqUnsafeLocking>;
pub type WriteLockGuardIrqSafe<'a, T> = RawWriteLockGuard<'a, T, IrqSafeLocking>;
pub type WriteLockGuardIrqSafe<'a, T> = RawWriteLockGuard<'a, T, IrqGuardLocking>;
pub type WriteLockGuardAnyTpr<'a, T, const TPR: usize> =
RawWriteLockGuard<'a, T, TprGuardLocking<TPR>>;

/// A simple Read-Write Lock (RWLock) that allows multiple readers or
/// one exclusive writer.
Expand Down Expand Up @@ -216,7 +221,7 @@ impl<T, I: IrqLocking> RawRWLock<T, I> {
///
/// A [`ReadLockGuard`] that provides read access to the protected data.
pub fn lock_read(&self) -> RawReadLockGuard<'_, T, I> {
let irq_state = I::irqs_disable();
let irq_state = I::acquire_lock();
loop {
let val = self.wait_for_writers();
let (readers, _) = split_val(val);
Expand Down Expand Up @@ -246,7 +251,7 @@ impl<T, I: IrqLocking> RawRWLock<T, I> {
///
/// A [`WriteLockGuard`] that provides write access to the protected data.
pub fn lock_write(&self) -> RawWriteLockGuard<'_, T, I> {
let irq_state = I::irqs_disable();
let irq_state = I::acquire_lock();

// Waiting for current writer to finish
loop {
Expand Down Expand Up @@ -277,7 +282,9 @@ impl<T, I: IrqLocking> RawRWLock<T, I> {
}

pub type RWLock<T> = RawRWLock<T, IrqUnsafeLocking>;
pub type RWLockIrqSafe<T> = RawRWLock<T, IrqSafeLocking>;
pub type RWLockIrqSafe<T> = RawRWLock<T, IrqGuardLocking>;
pub type RWLockAnyTpr<T, const TPR: usize> = RawRWLock<T, TprGuardLocking<TPR>>;
pub type RWLockTpr<T> = RWLockAnyTpr<T, { TPR_LOCK }>;

mod tests {
#[test]
Expand Down Expand Up @@ -380,7 +387,7 @@ mod tests {

// Lock for read
let guard = lock.lock_read();
// IRQs must still be enabled;
// IRQs must be disabled
assert!(irqs_disabled());
// Unlock
drop(guard);
Expand All @@ -391,4 +398,33 @@ mod tests {
raw_irqs_disable();
}
}

#[test]
#[cfg_attr(not(test_in_svsm), ignore = "Can only be run inside guest")]
fn rw_lock_tpr() {
use crate::cpu::irq_state::raw_get_tpr;
use crate::locking::*;
use crate::types::TPR_LOCK;

assert_eq!(raw_get_tpr(), 0);
let lock = RWLockTpr::new(0);

// Lock for write
let guard = lock.lock_write();
// TPR must be raised
assert_eq!(raw_get_tpr(), TPR_LOCK);
// Unlock
drop(guard);
// TPR must be restored
assert_eq!(raw_get_tpr(), 0);

// Lock for read
let guard = lock.lock_read();
// TPR must be raised
assert_eq!(raw_get_tpr(), TPR_LOCK);
// Unlock
drop(guard);
// TPR must be restored
assert_eq!(raw_get_tpr(), 0);
}
}
36 changes: 29 additions & 7 deletions kernel/src/locking/spinlock.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
// Author: Joerg Roedel <[email protected]>

use super::common::*;
use crate::types::TPR_LOCK;
use core::cell::UnsafeCell;
use core::marker::PhantomData;
use core::ops::{Deref, DerefMut};
Expand All @@ -29,7 +30,7 @@ use core::sync::atomic::{AtomicU64, Ordering};
/// ```
#[derive(Debug)]
#[must_use = "if unused the SpinLock will immediately unlock"]
pub struct RawLockGuard<'a, T, I = IrqUnsafeLocking> {
pub struct RawLockGuard<'a, T, I> {
holder: &'a AtomicU64,
data: &'a mut T,
#[expect(dead_code)]
Expand Down Expand Up @@ -64,7 +65,8 @@ impl<T, I> DerefMut for RawLockGuard<'_, T, I> {
}

pub type LockGuard<'a, T> = RawLockGuard<'a, T, IrqUnsafeLocking>;
pub type LockGuardIrqSafe<'a, T> = RawLockGuard<'a, T, IrqSafeLocking>;
pub type LockGuardIrqSafe<'a, T> = RawLockGuard<'a, T, IrqGuardLocking>;
pub type LockGuardAnyTpr<'a, T, const TPR: usize> = RawLockGuard<'a, T, TprGuardLocking<TPR>>;

/// A simple ticket-spinlock implementation for protecting concurrent data
/// access.
Expand Down Expand Up @@ -95,7 +97,7 @@ pub type LockGuardIrqSafe<'a, T> = RawLockGuard<'a, T, IrqSafeLocking>;
/// };
/// ```
#[derive(Debug, Default)]
pub struct RawSpinLock<T, I = IrqUnsafeLocking> {
pub struct RawSpinLock<T, I> {
/// This atomic counter is incremented each time a thread attempts to
/// acquire the lock. It helps to determine the order in which threads
/// acquire the lock.
Expand Down Expand Up @@ -150,7 +152,7 @@ impl<T, I: IrqLocking> RawSpinLock<T, I> {
/// }; // Lock is automatically released when `guard` goes out of scope.
/// ```
pub fn lock(&self) -> RawLockGuard<'_, T, I> {
let irq_state = I::irqs_disable();
let irq_state = I::acquire_lock();

let ticket = self.current.fetch_add(1, Ordering::Relaxed);
loop {
Expand All @@ -172,7 +174,7 @@ impl<T, I: IrqLocking> RawSpinLock<T, I> {
/// successfully acquired, it returns a [`LockGuard`] that automatically
/// releases the lock when it goes out of scope.
pub fn try_lock(&self) -> Option<RawLockGuard<'_, T, I>> {
let irq_state = I::irqs_disable();
let irq_state = I::acquire_lock();

let current = self.current.load(Ordering::Relaxed);
let holder = self.holder.load(Ordering::Acquire);
Expand All @@ -198,13 +200,16 @@ impl<T, I: IrqLocking> RawSpinLock<T, I> {
}

pub type SpinLock<T> = RawSpinLock<T, IrqUnsafeLocking>;
pub type SpinLockIrqSafe<T> = RawSpinLock<T, IrqSafeLocking>;
pub type SpinLockIrqSafe<T> = RawSpinLock<T, IrqGuardLocking>;
pub type SpinLockAnyTpr<T, const TPR: usize> = RawSpinLock<T, TprGuardLocking<TPR>>;
pub type SpinLockTpr<T> = SpinLockAnyTpr<T, { TPR_LOCK }>;

#[cfg(test)]
mod tests {
use super::*;
use crate::cpu::irq_state::{raw_irqs_disable, raw_irqs_enable};
use crate::cpu::irq_state::{raw_get_tpr, raw_irqs_disable, raw_irqs_enable};
use crate::cpu::{irqs_disabled, irqs_enabled};
use crate::types::TPR_LOCK;

#[test]
fn test_spin_lock() {
Expand Down Expand Up @@ -277,4 +282,21 @@ mod tests {
raw_irqs_disable();
}
}

#[test]
#[cfg_attr(not(test_in_svsm), ignore = "Can only be run inside guest")]
fn spin_trylock_tpr() {
assert_eq!(raw_get_tpr(), 0);

let spin_lock = SpinLockTpr::new(0);

// TPR is zero - taking the lock must succeed and raise TPR.
let g1 = spin_lock.try_lock();
assert!(g1.is_some());
assert_eq!(raw_get_tpr(), TPR_LOCK);

// Release lock and check if that resets TPR.
drop(g1);
assert_eq!(raw_get_tpr(), 0);
}
}
3 changes: 3 additions & 0 deletions kernel/src/types.rs
Original file line number Diff line number Diff line change
Expand Up @@ -93,3 +93,6 @@ impl TryFrom<usize> for Bytes {
}
}
}

pub const TPR_NORMAL: usize = 0;
pub const TPR_LOCK: usize = 2;

0 comments on commit 3a47166

Please sign in to comment.