Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
20 commits
Select commit Hold shift + click to select a range
eb8c2a7
Fix granular common unit test
chinglee-iot Jul 15, 2025
178e0ea
Merge branch 'feature/smp_granular_locks_v4' into dev/granular_lock_u…
chinglee-iot Aug 15, 2025
02f3f29
Fix for ISO C 90 compatible
chinglee-iot Aug 15, 2025
d11abdb
Update for xTaskRemoveFromEventList
chinglee-iot Aug 15, 2025
3e7b240
fix(freertos-smp): Fixed Lightweight Critical Sections for deferred s…
sudeep-mohanty Aug 5, 2025
2ac2c14
fix(freertos-smp): Stop unconditional yielding in vTaskPreemptionEnable
sudeep-mohanty Aug 2, 2025
e9310aa
fix(freertos-smp): Fix yielding decisions based on preemption state o…
sudeep-mohanty Aug 5, 2025
cc09d63
fix(freertos-smp): Miscellaneous fixes for granular locks
sudeep-mohanty Aug 15, 2025
ef14aaa
Merge remote-tracking branch 'sudeep/feature/smp_granular_locks_v4' i…
chinglee-iot Aug 19, 2025
0f27d66
Update granular lock implementation after merge
chinglee-iot Aug 20, 2025
ad45772
Update merge result
chinglee-iot Aug 20, 2025
bd5db02
Fix and refine queue granular lock implementation
chinglee-iot Aug 20, 2025
1d730b7
Queue logic refine
chinglee-iot Aug 20, 2025
0198b6d
Fix timers.c format
chinglee-iot Aug 20, 2025
13b8609
Update xTaskRemoveFromEventListFromISR implementation
chinglee-iot Aug 21, 2025
ae0b225
Merge remote-tracking branch 'origin/dev/granular_lock_unit_test_kern…
chinglee-iot Aug 22, 2025
0e452c0
Update the logic to yield preemption disabled task
chinglee-iot Aug 22, 2025
5217331
Update the logic to yield preemption disabled task
chinglee-iot Aug 22, 2025
84a60b3
Merge remote-tracking branch 'origin/dev/granular_lock_unit_test_kern…
chinglee-iot Aug 22, 2025
9035057
Remove unused macros
chinglee-iot Aug 22, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
84 changes: 15 additions & 69 deletions event_groups.c
Original file line number Diff line number Diff line change
Expand Up @@ -87,24 +87,6 @@
#define event_groupsEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxEventBits ) taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus );
#endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */

/*
* Locks an event group for tasks. Prevents other tasks from accessing the event group but allows
* ISRs to pend access to the event group. Caller cannot be preempted by other tasks
* after locking the event group, thus allowing the caller to execute non-deterministic
* operations.
*/
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
static void prvLockEventGroupForTasks( EventGroup_t * pxEventBits ) PRIVILEGED_FUNCTION;
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */

/*
* Unlocks an event group for tasks. Handles all pended access from ISRs, then reenables
* preemption for the caller.
*/
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
static BaseType_t prvUnlockEventGroupForTasks( EventGroup_t * pxEventBits ) PRIVILEGED_FUNCTION;
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */

/*
* Test the bits set in uxCurrentEventBits to see if the wait condition is met.
* The wait condition is defined by xWaitForAllBits. If xWaitForAllBits is
Expand All @@ -129,11 +111,19 @@
* When the task unlocks the event group, all pended access attempts are handled.
*/
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
#define event_groupsLOCK( pxEventBits ) prvLockEventGroupForTasks( pxEventBits )
#define event_groupsUNLOCK( pxEventBits ) prvUnlockEventGroupForTasks( pxEventBits );
#define event_groupsLOCK( pxEventBits ) taskDATA_GROUP_LOCK( &( ( pxEventBits )->xTaskSpinlock ) )
#define event_groupsUNLOCK( pxEventBits ) taskDATA_GROUP_UNLOCK( &( ( pxEventBits )->xTaskSpinlock ) )
#define event_groupsUNLOCK_WITH_YIELD_STATUS( pxEventBits, pxxAlreadyYielded ) \
do { \
taskDATA_GROUP_UNLOCK_WITH_YIELD_STATUS( &( ( pxEventBits )->xTaskSpinlock ), pxxAlreadyYielded ); \
} while( 0 )
#else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
#define event_groupsLOCK( pxEventBits ) vTaskSuspendAll()
#define event_groupsUNLOCK( pxEventBits ) xTaskResumeAll()
#define event_groupsUNLOCK( pxEventBits ) do{ ( void ) xTaskResumeAll(); } while( 0 )
#define event_groupsUNLOCK_WITH_YIELD_STATUS( pxEventBits, pxxAlreadyYielded ) \
do { \
*( pxxAlreadyYielded ) = xTaskResumeAll(); \
} while( 0 )
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */

/*-----------------------------------------------------------*/
Expand Down Expand Up @@ -316,7 +306,7 @@
}
}
}
xAlreadyYielded = event_groupsUNLOCK( pxEventBits );
event_groupsUNLOCK_WITH_YIELD_STATUS( pxEventBits, &xAlreadyYielded );

if( xTicksToWait != ( TickType_t ) 0 )
{
Expand Down Expand Up @@ -472,7 +462,7 @@
traceEVENT_GROUP_WAIT_BITS_BLOCK( xEventGroup, uxBitsToWaitFor );
}
}
xAlreadyYielded = event_groupsUNLOCK( pxEventBits );
event_groupsUNLOCK_WITH_YIELD_STATUS( pxEventBits, &xAlreadyYielded );

if( xTicksToWait != ( TickType_t ) 0 )
{
Expand Down Expand Up @@ -640,7 +630,6 @@
traceEVENT_GROUP_SET_BITS( xEventGroup, uxBitsToSet );

#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )

/* We are about to access the kernel data group non-deterministically,
* thus we suspend the kernel data group.*/
vTaskSuspendAll();
Expand Down Expand Up @@ -721,7 +710,7 @@
( void ) xTaskResumeAll();
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
}
( void ) event_groupsUNLOCK( pxEventBits );
event_groupsUNLOCK( pxEventBits );

traceRETURN_xEventGroupSetBits( uxReturnBits );

Expand All @@ -745,7 +734,6 @@
traceEVENT_GROUP_DELETE( xEventGroup );

#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )

/* We are about to access the kernel data group non-deterministically,
* thus we suspend the kernel data group.*/
vTaskSuspendAll();
Expand All @@ -763,7 +751,7 @@
( void ) xTaskResumeAll();
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
}
( void ) event_groupsUNLOCK( pxEventBits );
event_groupsUNLOCK( pxEventBits );

#if ( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 0 ) )
{
Expand Down Expand Up @@ -867,48 +855,6 @@
traceRETURN_vEventGroupClearBitsCallback();
}
/*-----------------------------------------------------------*/
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
static void prvLockEventGroupForTasks( EventGroup_t * pxEventBits )
{
/* Disable preemption so that the current task cannot be preempted by another task */
vTaskPreemptionDisable( NULL );

/* Keep holding xTaskSpinlock to prevent tasks on other cores from accessing
* the event group while it is suspended. */
portGET_SPINLOCK( portGET_CORE_ID(), &( pxEventBits->xTaskSpinlock ) );
}
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
/*-----------------------------------------------------------*/

#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
static BaseType_t prvUnlockEventGroupForTasks( EventGroup_t * pxEventBits )
{
BaseType_t xReturn = pdFALSE;

/* Release the previously held task spinlock */
portRELEASE_SPINLOCK( portGET_CORE_ID(), &( pxEventBits->xTaskSpinlock ) );

/* Re-enable preemption */
vTaskPreemptionEnable( NULL );

/* Yield if preemption was re-enabled*/
if( xTaskUnlockCanYield() == pdTRUE )
{
taskYIELD_WITHIN_API();

/* Return true as the task was preempted */
xReturn = pdTRUE;
}
else
{
/* Return false as the task was not preempted */
xReturn = pdFALSE;
}

return xReturn;
}
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
/*-----------------------------------------------------------*/

static BaseType_t prvTestWaitCondition( const EventBits_t uxCurrentEventBits,
const EventBits_t uxBitsToWaitFor,
Expand Down
8 changes: 2 additions & 6 deletions include/FreeRTOS.h
Original file line number Diff line number Diff line change
Expand Up @@ -2958,10 +2958,6 @@
#error configUSE_PORT_OPTIMISED_TASK_SELECTION is not supported in SMP FreeRTOS
#endif

#ifndef configLIGHTWEIGHT_CRITICAL_SECTION
#define configLIGHTWEIGHT_CRITICAL_SECTION 0
#endif

#ifndef configINITIAL_TICK_COUNT
#define configINITIAL_TICK_COUNT 0
#endif
Expand All @@ -2972,8 +2968,8 @@
* portTICK_TYPE_IS_ATOMIC was not set - map the critical sections used when
* the tick count is returned to the standard critical section macros. */
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
#define portTICK_TYPE_ENTER_CRITICAL() portENTER_CRITICAL_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock )
#define portTICK_TYPE_EXIT_CRITICAL() portEXIT_CRITICAL_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock )
#define portTICK_TYPE_ENTER_CRITICAL() kernelENTER_CRITICAL()
#define portTICK_TYPE_EXIT_CRITICAL() kernelEXIT_CRITICAL()
#else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
#define portTICK_TYPE_ENTER_CRITICAL() portENTER_CRITICAL()
#define portTICK_TYPE_EXIT_CRITICAL() portEXIT_CRITICAL()
Expand Down
135 changes: 103 additions & 32 deletions include/task.h
Original file line number Diff line number Diff line change
Expand Up @@ -299,11 +299,11 @@ typedef enum
{ \
const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID(); \
/* Task spinlock is always taken first */ \
portGET_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) pxTaskSpinlock ); \
portGET_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) ( pxTaskSpinlock ) ); \
/* Disable interrupts */ \
portDISABLE_INTERRUPTS(); \
/* Take the ISR spinlock next */ \
portGET_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) pxISRSpinlock ); \
portGET_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) ( pxISRSpinlock ) ); \
/* Increment the critical nesting count */ \
portINCREMENT_CRITICAL_NESTING_COUNT( xCoreID ); \
} \
Expand All @@ -322,11 +322,13 @@ typedef enum
#define taskDATA_GROUP_ENTER_CRITICAL_FROM_ISR( pxISRSpinlock, puxSavedInterruptStatus ) \
do { \
*( puxSavedInterruptStatus ) = portSET_INTERRUPT_MASK_FROM_ISR(); \
const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID(); \
/* Take the ISR spinlock */ \
portGET_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) pxISRSpinlock ); \
/* Increment the critical nesting count */ \
portINCREMENT_CRITICAL_NESTING_COUNT( xCoreID ); \
{ \
const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID(); \
/* Take the ISR spinlock */ \
portGET_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) ( pxISRSpinlock ) ); \
/* Increment the critical nesting count */ \
portINCREMENT_CRITICAL_NESTING_COUNT( xCoreID ); \
} \
} while( 0 )
#endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */

Expand All @@ -339,27 +341,27 @@ typedef enum
* \ingroup GranularLocks
*/
#if ( portUSING_GRANULAR_LOCKS == 1 )
#define taskDATA_GROUP_EXIT_CRITICAL( pxTaskSpinlock, pxISRSpinlock ) \
do { \
const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID(); \
configASSERT( portGET_CRITICAL_NESTING_COUNT( xCoreID ) > 0U ); \
/* Release the ISR spinlock */ \
portRELEASE_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) pxISRSpinlock ); \
/* Release the task spinlock */ \
portRELEASE_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) pxTaskSpinlock ); \
/* Decrement the critical nesting count */ \
portDECREMENT_CRITICAL_NESTING_COUNT( xCoreID ); \
/* Enable interrupts only if the critical nesting count is 0 */ \
if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0 ) \
{ \
portENABLE_INTERRUPTS(); \
} \
else \
{ \
mtCOVERAGE_TEST_MARKER(); \
} \
/* Re-enable preemption */ \
vTaskPreemptionEnable( NULL ); \
#define taskDATA_GROUP_EXIT_CRITICAL( pxTaskSpinlock, pxISRSpinlock ) \
do { \
const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID(); \
configASSERT( portGET_CRITICAL_NESTING_COUNT( xCoreID ) > 0U ); \
/* Release the ISR spinlock */ \
portRELEASE_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) ( pxISRSpinlock ) ); \
/* Release the task spinlock */ \
portRELEASE_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) ( pxTaskSpinlock ) ); \
/* Decrement the critical nesting count */ \
portDECREMENT_CRITICAL_NESTING_COUNT( xCoreID ); \
/* Enable interrupts only if the critical nesting count is 0 */ \
if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0 ) \
{ \
portENABLE_INTERRUPTS(); \
} \
else \
{ \
mtCOVERAGE_TEST_MARKER(); \
} \
/* Re-enable preemption */ \
vTaskPreemptionEnable( NULL ); \
} while( 0 )
#endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */

Expand All @@ -379,14 +381,82 @@ typedef enum
/* Decrement the critical nesting count */ \
portDECREMENT_CRITICAL_NESTING_COUNT( xCoreID ); \
/* Release the ISR spinlock */ \
portRELEASE_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) pxISRSpinlock ); \
portRELEASE_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) ( pxISRSpinlock ) ); \
if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0 ) \
{ \
portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); \
} \
} while( 0 )
#endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */

/**
* task. h
*
* Macros to lock a data group (task-level lock only).
*
* \defgroup taskDATA_GROUP_LOCK taskDATA_GROUP_LOCK
* \ingroup GranularLocks
*/
#if ( portUSING_GRANULAR_LOCKS == 1 )
#define taskDATA_GROUP_LOCK( pxTaskSpinlock ) \
do { \
/* Disable preemption while holding the task spinlock. */ \
vTaskPreemptionDisable( NULL ); \
portGET_SPINLOCK( portGET_CORE_ID(), ( portSPINLOCK_TYPE * ) ( pxTaskSpinlock ) ); \
} while( 0 )
#endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */

/**
* task. h
*
* Macros to unlock a data group (task-level lock only).
*
* \defgroup taskDATA_GROUP_UNLOCK taskDATA_GROUP_UNLOCK
* \ingroup GranularLocks
*/
#if ( portUSING_GRANULAR_LOCKS == 1 )
#define taskDATA_GROUP_UNLOCK( pxTaskSpinlock ) \
do { \
portRELEASE_SPINLOCK( portGET_CORE_ID(), ( portSPINLOCK_TYPE * ) ( pxTaskSpinlock ) ); \
/* Re-enable preemption after releasing the task spinlock. */ \
vTaskPreemptionEnable( NULL ); \
} while( 0 )
#endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */

/**
* task. h
*
* Macros to unlock a data group (task-level lock only).
*
* \defgroup taskDATA_GROUP_UNLOCK taskDATA_GROUP_UNLOCK
* \ingroup GranularLocks
*/
#if ( portUSING_GRANULAR_LOCKS == 1 )
#define taskDATA_GROUP_UNLOCK( pxTaskSpinlock ) \
do { \
portRELEASE_SPINLOCK( portGET_CORE_ID(), ( portSPINLOCK_TYPE * ) ( pxTaskSpinlock ) ); \
/* Re-enable preemption after releasing the task spinlock. */ \
vTaskPreemptionEnable( NULL ); \
} while( 0 )
#endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */

/**
* task. h
*
* Macros to unlock a data group and return the task yield status. (task-level lock only).
*
* \defgroup taskDATA_GROUP_UNLOCK taskDATA_GROUP_UNLOCK_WITH_YIELD_STATUS
* \ingroup GranularLocks
*/
#if ( portUSING_GRANULAR_LOCKS == 1 )
#define taskDATA_GROUP_UNLOCK_WITH_YIELD_STATUS( pxTaskSpinlock, pxTaskAlreadyYielded ) \
do { \
portRELEASE_SPINLOCK( portGET_CORE_ID(), ( portSPINLOCK_TYPE * ) ( pxTaskSpinlock ) ); \
/* Re-enable preemption after releasing the task spinlock. */ \
*( pxTaskAlreadyYielded ) = xCurrentTaskPreemptionEnable(); \
} while( 0 )
#endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */

/*-----------------------------------------------------------
* TASK CREATION API
*----------------------------------------------------------*/
Expand Down Expand Up @@ -3681,6 +3751,7 @@ void vTaskPlaceOnEventListRestricted( List_t * const pxEventList,
* making the call, otherwise pdFALSE.
*/
BaseType_t xTaskRemoveFromEventList( const List_t * const pxEventList ) PRIVILEGED_FUNCTION;
BaseType_t xTaskRemoveFromEventListFromISR( const List_t * const pxEventList ) PRIVILEGED_FUNCTION;
void vTaskRemoveFromUnorderedEventList( ListItem_t * pxEventListItem,
const TickType_t xItemValue ) PRIVILEGED_FUNCTION;

Expand Down Expand Up @@ -3876,13 +3947,13 @@ void vTaskInternalSetTimeOutState( TimeOut_t * const pxTimeOut ) PRIVILEGED_FUNC
#endif

/*
* Checks whether a yield is required after portUNLOCK_DATA_GROUP() returns.
* To be called while data group is locked.
* Enable preemption of current task asl return the task already yield status.
*/
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
BaseType_t xTaskUnlockCanYield( void );
BaseType_t xCurrentTaskPreemptionEnable( void );
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */


#if ( portUSING_MPU_WRAPPERS == 1 )

/*
Expand Down
Loading
Loading