Skip to content

Commit

Permalink
sw: implement recursive mutex
Browse files Browse the repository at this point in the history
  • Loading branch information
NikLeberg committed Jan 6, 2024
1 parent 2a2301b commit 4ff452a
Show file tree
Hide file tree
Showing 2 changed files with 99 additions and 25 deletions.
75 changes: 63 additions & 12 deletions sw/include/smp.h
Original file line number Diff line number Diff line change
Expand Up @@ -2,10 +2,10 @@
* @file smp.h
* @author NikLeberg ([email protected])
* @brief Symmetric Multi Processing for the neorv32.
* @version 0.1
* @date 2023-08-09
* @version 0.2
* @date 2024-01-04
*
* @copyright Copyright (c) 2023 Niklaus Leuenberger
* @copyright Copyright (c) 2024 Niklaus Leuenberger
*
*/

Expand All @@ -16,40 +16,91 @@
#define SMP_SPINLOCK_UNLOCKED (0) //<! spinlock is unlocked
#define SMP_SPINLOCK_LOCKED (1) //<! spinlock is locked

#define SMP_MUTEX_FREE (UINT32_MAX) //<! mutex is owned by noone


/**
* @brief Spinlock data type.
*
* @note Must be initialized with SMP_SPINLOCK_INIT!
*/
typedef struct smp_spinlock_s {
uint32_t lock; //<! 0 = unlocked, 1 = locked
uint32_t owner; //<! hart id of owning cpu
typedef uint32_t smp_spinlock_t; //<! 0 = unlocked, 1 = locked

/**
* @brief Mutex data type.
*
* @note Must be initialized with SMP_MUTEX_INIT!
*/
typedef struct smp_mutex_s {
uint32_t owner; //<! hart id of owning cpu, or SMP_MUTEX_FREE
uint32_t recursion_count; //<! how often the same hart took the lock
} smp_spinlock_t;
smp_spinlock_t lock; //<! spinlock to protect data access
} smp_mutex_t;


/**
* @brief Get the HART id of this HART.
*
* @return this HARTSs id
*/
#define smp_get_hart_id() neorv32_cpu_csr_read(CSR_MHARTID)

/**
* @brief Set the inter processor interrupt (IPI) of a HART.
*
*/
#define smp_set_ipi_for_hart(hart_id) \
neorv32_cpu_store_unsigned_word(0xf0000000 + (4 * hart_id), 1)

/**
* @brief Reset the inter processor interrupt (IPI) of a HART.
*
*/
#define smp_reset_ipi_for_hart(hart_id) \
neorv32_cpu_store_unsigned_word(0xf0000000 + (4 * hart_id), 0)

/**
* @brief Default initialization of an unlocked spinlock.
*
*/
#define SMP_SPINLOCK_INIT \
{ .lock = SMP_SPINLOCK_UNLOCKED, .owner = UINT32_MAX, .recursion_count = 0 }
SMP_SPINLOCK_UNLOCKED

/**
* @brief Aquire the lock, spins until lock was aquired.
*
* Lock is allowed to be taken recursively by the same cpu.
*
* @param lock lock to aquire
*/
void smp_spinlock_lock(smp_spinlock_t *lock);

/**
* @brief Release the currently held spinlock.
*
* Lock is allowed to be given recursively by the same cpu.
*
* @param lock lock to release
*/
void smp_spinlock_unlock(smp_spinlock_t *lock);

/**
* @brief Default initialization of an unlocked mutex.
*
*/
#define SMP_MUTEX_INIT \
{ .owner = UINT32_MAX, .recursion_count = 0, .lock = SMP_SPINLOCK_INIT }

/**
* @brief Take the mutex once.
*
* Mutex is allowed to be taken recursively by the same HART.
*
* @param mutex mutex to take
*/
void smp_mutex_take(smp_mutex_t *mutex);

/**
* @brief Give a currently held mutex back once.
*
* Mutex is allowed to be given recursively by the same HART.
*
* @param mutex mutex to release
*/
void smp_mutex_give(smp_mutex_t *mutex);
49 changes: 36 additions & 13 deletions sw/src/smp.c
Original file line number Diff line number Diff line change
Expand Up @@ -17,31 +17,54 @@
#include <neorv32_cpu_amo.h>


#define get_hart_id() neorv32_cpu_csr_read(CSR_MHARTID)

void smp_spinlock_lock(smp_spinlock_t *lock) {
// uint32_t hart_id = get_hart_id();
// if (lock->owner == hart_id) {
// // We are the current owner of the spinlock. Simply increment counter.
// ++(lock->recursion_count);
// } else {
// If amoswap returns SMP_SPINLOCK_LOCKED, the lock was already set, and
// we must continue to loop. If it returns SMP_SPINLOCK_UNLOCKED, then
// the lock was free, and we have now acquired it.
while (neorv32_cpu_amoswapw((uint32_t)&lock->lock, SMP_SPINLOCK_LOCKED) == SMP_SPINLOCK_LOCKED) {
while (neorv32_cpu_amoswapw((uint32_t)lock, SMP_SPINLOCK_LOCKED) == SMP_SPINLOCK_LOCKED) {
// We potentially have no dcache and if we have then certainly no
// coherency hetween the caches. Relax the bus utilization with nop.
asm volatile("nop");
}
// // Assign lock to us.
// lock->owner = hart_id;
// }
}

void smp_spinlock_unlock(smp_spinlock_t *lock) {
// Assuming no dcache the following would work:
// lock->lock = SMP_SPINLOCK_UNLOCKED;
// *lock = SMP_SPINLOCK_UNLOCKED;
// But we potentially have a dcache and no coherency (so far).
// We reuse the store-conditional instruction as it is always uncached.
neorv32_cpu_store_conditional_word((uint32_t)&lock->lock, SMP_SPINLOCK_UNLOCKED);
neorv32_cpu_store_conditional_word((uint32_t)lock, SMP_SPINLOCK_UNLOCKED);
}

void smp_mutex_take(smp_mutex_t *mutex) {
// Check if we currently own this mutex, if we do, we can simply increment
// the recursion counter. But if owned by another HART, then we have to give
// back the spinlock, wait a bit, lock it again and again check the owner in
// the hope that the previous owner gave it back.
smp_spinlock_lock(&mutex->lock);
uint32_t hart_id = smp_get_hart_id();
if (mutex->owner == hart_id) {
mutex->recursion_count++;
} else {
while (mutex->owner != SMP_MUTEX_FREE) {
smp_spinlock_unlock(&mutex->lock);
smp_spinlock_lock(&mutex->lock);
}
mutex->owner = hart_id;
mutex->recursion_count = 0;
}
smp_spinlock_unlock(&mutex->lock);
}

void smp_mutex_give(smp_mutex_t *mutex) {
smp_spinlock_lock(&mutex->lock);
if (mutex->owner == smp_get_hart_id()) {
if (mutex->recursion_count > 0) {
mutex->recursion_count--;
}
if (mutex->recursion_count == 0) {
mutex->owner = SMP_MUTEX_FREE;
}
}
smp_spinlock_unlock(&mutex->lock);
}

0 comments on commit 4ff452a

Please sign in to comment.