From eb8c2a7138aede84a63da4b09a3c0e2ad7943768 Mon Sep 17 00:00:00 2001 From: "Ching-Hsin,Lee" Date: Tue, 15 Jul 2025 17:10:34 +0800 Subject: [PATCH 01/16] Fix granular common unit test --- include/task.h | 3 ++- tasks.c | 11 +++++++++-- timers.c | 13 +++++++++---- 3 files changed, 20 insertions(+), 7 deletions(-) diff --git a/include/task.h b/include/task.h index b3040c64375..87884c4cc52 100644 --- a/include/task.h +++ b/include/task.h @@ -321,8 +321,9 @@ typedef enum #if ( portUSING_GRANULAR_LOCKS == 1 ) #define taskDATA_GROUP_ENTER_CRITICAL_FROM_ISR( pxISRSpinlock, puxSavedInterruptStatus ) \ do { \ + BaseType_t xCoreID; \ *( puxSavedInterruptStatus ) = portSET_INTERRUPT_MASK_FROM_ISR(); \ - const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID(); \ + xCoreID = portGET_CORE_ID(); \ /* Take the ISR spinlock */ \ portGET_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) pxISRSpinlock ); \ /* Increment the critical nesting count */ \ diff --git a/tasks.c b/tasks.c index 80ad29497f2..d8c56ce396f 100644 --- a/tasks.c +++ b/tasks.c @@ -4941,10 +4941,14 @@ BaseType_t xTaskIncrementTick( void ) TickType_t xItemValue; BaseType_t xSwitchRequired = pdFALSE; + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + UBaseType_t uxSavedInterruptStatus; + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + traceENTER_xTaskIncrementTick(); #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) - UBaseType_t uxSavedInterruptStatus = kernelENTER_CRITICAL_FROM_ISR(); + uxSavedInterruptStatus = kernelENTER_CRITICAL_FROM_ISR(); #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ /* Called by the portable layer each time a tick interrupt occurs. @@ -5678,6 +5682,10 @@ BaseType_t xTaskRemoveFromEventList( const List_t * const pxEventList ) TCB_t * pxUnblockedTCB; BaseType_t xReturn; + #if ( ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) ) + UBaseType_t uxSavedInterruptStatus; + #endif + traceENTER_xTaskRemoveFromEventList( pxEventList ); #if ( !( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) ) @@ -5686,7 +5694,6 @@ BaseType_t xTaskRemoveFromEventList( const List_t * const pxEventList ) * called from a critical section within an ISR. */ #else /* #if ( ! ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) ) */ /* Lock the kernel data group as we are about to access its members */ - UBaseType_t uxSavedInterruptStatus; if( portCHECK_IF_IN_ISR() == pdTRUE ) { diff --git a/timers.c b/timers.c index 6408b651647..96a02323e3e 100644 --- a/timers.c +++ b/timers.c @@ -83,8 +83,8 @@ * Macros to mark the start and end of a critical code region. */ #if ( portUSING_GRANULAR_LOCKS == 1 ) - #define tmrENTER_CRITICAL() taskDATA_GROUP_ENTER_CRITICAL( &xTaskSpinlock, &xISRSpinlock ) - #define tmrEXIT_CRITICAL() taskDATA_GROUP_EXIT_CRITICAL( &xTaskSpinlock, &xISRSpinlock ) + #define tmrENTER_CRITICAL() taskDATA_GROUP_ENTER_CRITICAL( &xTimerTaskSpinlock, &xTimerISRSpinlock ) + #define tmrEXIT_CRITICAL() taskDATA_GROUP_EXIT_CRITICAL( &xTimerTaskSpinlock, &xTimerISRSpinlock ) #else /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */ #define tmrENTER_CRITICAL() taskENTER_CRITICAL() #define tmrEXIT_CRITICAL() taskEXIT_CRITICAL() @@ -161,8 +161,13 @@ PRIVILEGED_DATA static TaskHandle_t xTimerTaskHandle = NULL; #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) - PRIVILEGED_DATA static portSPINLOCK_TYPE xTaskSpinlock = portINIT_SPINLOCK_STATIC; - PRIVILEGED_DATA static portSPINLOCK_TYPE xISRSpinlock = portINIT_SPINLOCK_STATIC; + #ifdef portREMOVE_STATIC_QUALIFIER + PRIVILEGED_DATA portSPINLOCK_TYPE xTimerTaskSpinlock = portINIT_SPINLOCK_STATIC; + PRIVILEGED_DATA portSPINLOCK_TYPE xTimerISRSpinlock = portINIT_SPINLOCK_STATIC; + #else + PRIVILEGED_DATA static portSPINLOCK_TYPE xTimerTaskSpinlock = portINIT_SPINLOCK_STATIC; + PRIVILEGED_DATA static portSPINLOCK_TYPE xTimerISRSpinlock = portINIT_SPINLOCK_STATIC; + #endif #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ /*-----------------------------------------------------------*/ From 02f3f29c3de6efefc312e4e14bbf9dbc8a10676f Mon Sep 17 00:00:00 2001 From: "Ching-Hsin,Lee" Date: Fri, 15 Aug 2025 11:24:29 +0800 Subject: [PATCH 02/16] Fix for ISO C 90 compatible --- include/task.h | 20 +++++++++- tasks.c | 104 ++++++++++++++++++++++++++++++------------------- timers.c | 13 +++++-- 3 files changed, 92 insertions(+), 45 deletions(-) diff --git a/include/task.h b/include/task.h index 9dbc8db89e0..a7d2ba24021 100644 --- a/include/task.h +++ b/include/task.h @@ -321,8 +321,9 @@ typedef enum #if ( portUSING_GRANULAR_LOCKS == 1 ) #define taskDATA_GROUP_ENTER_CRITICAL_FROM_ISR( pxISRSpinlock, puxSavedInterruptStatus ) \ do { \ + BaseType_t xCoreID; \ *( puxSavedInterruptStatus ) = portSET_INTERRUPT_MASK_FROM_ISR(); \ - const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID(); \ + xCoreID = ( BaseType_t ) portGET_CORE_ID(); \ /* Take the ISR spinlock */ \ portGET_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) pxISRSpinlock ); \ /* Increment the critical nesting count */ \ @@ -387,6 +388,23 @@ typedef enum } while( 0 ) #endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */ +#if ( portUSING_GRANULAR_LOCKS == 1 ) + #define taskDATA_GROUP_LOCK( pxTaskSpinlock ) \ + do{ \ + vTaskPreemptionDisable( NULL ); \ + portGET_SPINLOCK( portGET_CORE_ID(), (portSPINLOCK_TYPE *)(pxTaskSpinlock) ); \ + } while( 0 ) +#endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */ + +#if ( portUSING_GRANULAR_LOCKS == 1 ) + #define taskDATA_GROUP_UNLOCK( pxTaskSpinlock ) \ + do{ \ + portRELEASE_SPINLOCK( portGET_CORE_ID(), (portSPINLOCK_TYPE *)(pxTaskSpinlock) ); \ + vTaskPreemptionEnable( NULL ); \ + } while( 0 ) +#endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */ + + /*----------------------------------------------------------- * TASK CREATION API *----------------------------------------------------------*/ diff --git a/tasks.c b/tasks.c index 269eb0a490c..489fb33dddb 100644 --- a/tasks.c +++ b/tasks.c @@ -3739,6 +3739,10 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, TCB_t * const pxTCB = xTaskToResume; UBaseType_t uxSavedInterruptStatus; + #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) + BaseType_t xTaskResumed = pdFALSE; + #endif /* #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) */ + traceENTER_xTaskResumeFromISR( xTaskToResume ); configASSERT( xTaskToResume ); @@ -3766,58 +3770,79 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, /* coverity[misra_c_2012_directive_4_7_violation] */ uxSavedInterruptStatus = kernelENTER_CRITICAL_FROM_ISR(); { - if( prvTaskIsTaskSuspended( pxTCB ) != pdFALSE ) + #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) { - traceTASK_RESUME_FROM_ISR( pxTCB ); + /* If the task being resumed is in a deferred suspension state, + * we simply clear the deferred suspension state and return. */ + if( pxTCB->uxDeferredStateChange & tskDEFERRED_SUSPENSION ) + { + pxTCB->uxDeferredStateChange &= ~tskDEFERRED_SUSPENSION; + xTaskResumed = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #endif /* configUSE_TASK_PREEMPTION_DISABLE */ - /* Check the ready lists can be accessed. */ - if( uxSchedulerSuspended == ( UBaseType_t ) 0U ) + #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) + if( xTaskResumed == pdFALSE ) + #endif /* configUSE_TASK_PREEMPTION_DISABLE */ + { + if( prvTaskIsTaskSuspended( pxTCB ) != pdFALSE ) { - #if ( configNUMBER_OF_CORES == 1 ) + traceTASK_RESUME_FROM_ISR( pxTCB ); + + /* Check the ready lists can be accessed. */ + if( uxSchedulerSuspended == ( UBaseType_t ) 0U ) { - /* Ready lists can be accessed so move the task from the - * suspended list to the ready list directly. */ - if( pxTCB->uxPriority > pxCurrentTCB->uxPriority ) + #if ( configNUMBER_OF_CORES == 1 ) { - xYieldRequired = pdTRUE; + /* Ready lists can be accessed so move the task from the + * suspended list to the ready list directly. */ + if( pxTCB->uxPriority > pxCurrentTCB->uxPriority ) + { + xYieldRequired = pdTRUE; - /* Mark that a yield is pending in case the user is not - * using the return value to initiate a context switch - * from the ISR using the port specific portYIELD_FROM_ISR(). */ - xYieldPendings[ 0 ] = pdTRUE; + /* Mark that a yield is pending in case the user is not + * using the return value to initiate a context switch + * from the ISR using the port specific portYIELD_FROM_ISR(). */ + xYieldPendings[ 0 ] = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } } - else + #endif /* #if ( configNUMBER_OF_CORES == 1 ) */ + + ( void ) uxListRemove( &( pxTCB->xStateListItem ) ); + prvAddTaskToReadyList( pxTCB ); + } + else + { + /* The delayed or ready lists cannot be accessed so the task + * is held in the pending ready list until the scheduler is + * unsuspended. */ + vListInsertEnd( &( xPendingReadyList ), &( pxTCB->xEventListItem ) ); + } + + #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_PREEMPTION == 1 ) ) + { + prvYieldForTask( pxTCB ); + + if( xYieldPendings[ portGET_CORE_ID() ] != pdFALSE ) { - mtCOVERAGE_TEST_MARKER(); + xYieldRequired = pdTRUE; } } - #endif /* #if ( configNUMBER_OF_CORES == 1 ) */ - - ( void ) uxListRemove( &( pxTCB->xStateListItem ) ); - prvAddTaskToReadyList( pxTCB ); + #endif /* #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_PREEMPTION == 1 ) ) */ } else { - /* The delayed or ready lists cannot be accessed so the task - * is held in the pending ready list until the scheduler is - * unsuspended. */ - vListInsertEnd( &( xPendingReadyList ), &( pxTCB->xEventListItem ) ); - } - - #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_PREEMPTION == 1 ) ) - { - prvYieldForTask( pxTCB ); - - if( xYieldPendings[ portGET_CORE_ID() ] != pdFALSE ) - { - xYieldRequired = pdTRUE; - } + mtCOVERAGE_TEST_MARKER(); } - #endif /* #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_PREEMPTION == 1 ) ) */ - } - else - { - mtCOVERAGE_TEST_MARKER(); } } kernelEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); @@ -5042,7 +5067,7 @@ BaseType_t xTaskIncrementTick( void ) traceENTER_xTaskIncrementTick(); #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) - UBaseType_t uxSavedInterruptStatus = kernelENTER_CRITICAL_FROM_ISR(); + uxSavedInterruptStatus = kernelENTER_CRITICAL_FROM_ISR(); #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ /* Called by the portable layer each time a tick interrupt occurs. @@ -5788,7 +5813,6 @@ BaseType_t xTaskRemoveFromEventList( const List_t * const pxEventList ) * called from a critical section within an ISR. */ #else /* #if ( ! ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) ) */ /* Lock the kernel data group as we are about to access its members */ - UBaseType_t uxSavedInterruptStatus; if( portCHECK_IF_IN_ISR() == pdTRUE ) { diff --git a/timers.c b/timers.c index 6408b651647..96a02323e3e 100644 --- a/timers.c +++ b/timers.c @@ -83,8 +83,8 @@ * Macros to mark the start and end of a critical code region. */ #if ( portUSING_GRANULAR_LOCKS == 1 ) - #define tmrENTER_CRITICAL() taskDATA_GROUP_ENTER_CRITICAL( &xTaskSpinlock, &xISRSpinlock ) - #define tmrEXIT_CRITICAL() taskDATA_GROUP_EXIT_CRITICAL( &xTaskSpinlock, &xISRSpinlock ) + #define tmrENTER_CRITICAL() taskDATA_GROUP_ENTER_CRITICAL( &xTimerTaskSpinlock, &xTimerISRSpinlock ) + #define tmrEXIT_CRITICAL() taskDATA_GROUP_EXIT_CRITICAL( &xTimerTaskSpinlock, &xTimerISRSpinlock ) #else /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */ #define tmrENTER_CRITICAL() taskENTER_CRITICAL() #define tmrEXIT_CRITICAL() taskEXIT_CRITICAL() @@ -161,8 +161,13 @@ PRIVILEGED_DATA static TaskHandle_t xTimerTaskHandle = NULL; #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) - PRIVILEGED_DATA static portSPINLOCK_TYPE xTaskSpinlock = portINIT_SPINLOCK_STATIC; - PRIVILEGED_DATA static portSPINLOCK_TYPE xISRSpinlock = portINIT_SPINLOCK_STATIC; + #ifdef portREMOVE_STATIC_QUALIFIER + PRIVILEGED_DATA portSPINLOCK_TYPE xTimerTaskSpinlock = portINIT_SPINLOCK_STATIC; + PRIVILEGED_DATA portSPINLOCK_TYPE xTimerISRSpinlock = portINIT_SPINLOCK_STATIC; + #else + PRIVILEGED_DATA static portSPINLOCK_TYPE xTimerTaskSpinlock = portINIT_SPINLOCK_STATIC; + PRIVILEGED_DATA static portSPINLOCK_TYPE xTimerISRSpinlock = portINIT_SPINLOCK_STATIC; + #endif #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ /*-----------------------------------------------------------*/ From d11abdb9043cf54a0751862c95642497723acd23 Mon Sep 17 00:00:00 2001 From: "Ching-Hsin,Lee" Date: Fri, 15 Aug 2025 17:24:41 +0800 Subject: [PATCH 03/16] Update for xTaskRemoveFromEventList --- include/task.h | 3 ++ queue.c | 83 ++++++++++++++++++++++++++++++++++++++-- tasks.c | 100 ++++++++++++++++++++++++++----------------------- 3 files changed, 137 insertions(+), 49 deletions(-) diff --git a/include/task.h b/include/task.h index a7d2ba24021..3ca3d7c3302 100644 --- a/include/task.h +++ b/include/task.h @@ -3699,6 +3699,9 @@ void vTaskPlaceOnEventListRestricted( List_t * const pxEventList, * making the call, otherwise pdFALSE. */ BaseType_t xTaskRemoveFromEventList( const List_t * const pxEventList ) PRIVILEGED_FUNCTION; +#if ( portUSING_GRANULAR_LOCKS == 1 ) + BaseType_t xTaskRemoveFromEventListFromISR( const List_t * const pxEventList ) PRIVILEGED_FUNCTION; +#endif void vTaskRemoveFromUnorderedEventList( ListItem_t * pxEventListItem, const TickType_t xItemValue ) PRIVILEGED_FUNCTION; diff --git a/queue.c b/queue.c index 21426d448b8..f4df9f77470 100644 --- a/queue.c +++ b/queue.c @@ -222,6 +222,11 @@ static void prvCopyDataFromQueue( Queue_t * const pxQueue, * the queue set that the queue contains data. */ static BaseType_t prvNotifyQueueSetContainer( const Queue_t * const pxQueue ) PRIVILEGED_FUNCTION; + #if ( portUSING_GRANULAR_LOCKS == 1 ) + static BaseType_t prvNotifyQueueSetContainerFromISR( const Queue_t * const pxQueue ); + #else + #define prvNotifyQueueSetContainerFromISR prvNotifyQueueSetContainer + #endif #endif /* @@ -1319,7 +1324,11 @@ BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue, { if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE ) { - if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) + #if ( portUSING_GRANULAR_LOCKS == 1 ) + if( xTaskRemoveFromEventListFromISR( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) + #else + if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) + #endif { /* The task waiting has a higher priority so * record that a context switch is required. */ @@ -1493,7 +1502,11 @@ BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue, { if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE ) { - if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) + #if ( portUSING_GRANULAR_LOCKS == 1 ) + if( xTaskRemoveFromEventListFromISR( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) + #else + if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) + #endif { /* The task waiting has a higher priority so * record that a context switch is required. */ @@ -2113,7 +2126,11 @@ BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue, { if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE ) { - if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE ) + #if ( portUSING_GRANULAR_LOCKS == 1 ) + if( xTaskRemoveFromEventListFromISR( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) + #else + if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) + #endif { /* The task waiting has a higher priority than us so * force a context switch. */ @@ -3409,4 +3426,64 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue ) return xReturn; } + #if ( portUSING_GRANULAR_LOCKS == 1 ) + static BaseType_t prvNotifyQueueSetContainerFromISR( const Queue_t * const pxQueue ) + { + Queue_t * pxQueueSetContainer = pxQueue->pxQueueSetContainer; + BaseType_t xReturn = pdFALSE; + + /* This function must be called form a critical section. */ + + /* The following line is not reachable in unit tests because every call + * to prvNotifyQueueSetContainer is preceded by a check that + * pxQueueSetContainer != NULL */ + configASSERT( pxQueueSetContainer ); /* LCOV_EXCL_BR_LINE */ + configASSERT( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength ); + + if( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength ) + { + const int8_t cTxLock = pxQueueSetContainer->cTxLock; + + traceQUEUE_SET_SEND( pxQueueSetContainer ); + + /* The data copied is the handle of the queue that contains data. */ + xReturn = prvCopyDataToQueue( pxQueueSetContainer, &pxQueue, queueSEND_TO_BACK ); + + if( cTxLock == queueUNLOCKED ) + { + if( listLIST_IS_EMPTY( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) == pdFALSE ) + { + #if ( portUSING_GRANULAR_LOCKS == 1 ) + if( xTaskRemoveFromEventListFromISR( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) + #else + if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) + #endif + { + /* The task waiting has a higher priority. */ + xReturn = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + prvIncrementQueueTxLock( pxQueueSetContainer, cTxLock ); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + return xReturn; + } + #endif + #endif /* configUSE_QUEUE_SETS */ diff --git a/tasks.c b/tasks.c index 489fb33dddb..1196e65d706 100644 --- a/tasks.c +++ b/tasks.c @@ -5796,41 +5796,13 @@ void vTaskPlaceOnUnorderedEventList( List_t * pxEventList, #endif /* configUSE_TIMERS */ /*-----------------------------------------------------------*/ -BaseType_t xTaskRemoveFromEventList( const List_t * const pxEventList ) +static BaseType_t prvTaskRemoveFromEventList( const List_t * const pxEventList ) { TCB_t * pxUnblockedTCB; BaseType_t xReturn; - #if ( ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) ) - UBaseType_t uxSavedInterruptStatus; - #endif - traceENTER_xTaskRemoveFromEventList( pxEventList ); - #if ( !( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) ) - - /* THIS FUNCTION MUST BE CALLED FROM A CRITICAL SECTION. It can also be - * called from a critical section within an ISR. */ - #else /* #if ( ! ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) ) */ - /* Lock the kernel data group as we are about to access its members */ - - if( portCHECK_IF_IN_ISR() == pdTRUE ) - { - uxSavedInterruptStatus = kernelENTER_CRITICAL_FROM_ISR(); - } - else - { - uxSavedInterruptStatus = 0; - kernelENTER_CRITICAL(); - } - - /* Before taking the kernel lock, another task/ISR could have already - * emptied the pxEventList. So we insert a check here to see if - * pxEventList is empty before attempting to remove an item from it. */ - if( listLIST_IS_EMPTY( pxEventList ) == pdFALSE ) - { - #endif /* #if ( ! ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) ) */ - /* The event list is sorted in priority order, so the first in the list can * be removed as it is known to be the highest priority. Remove the TCB from * the delayed list, and add it to the ready list. @@ -5909,31 +5881,67 @@ BaseType_t xTaskRemoveFromEventList( const List_t * const pxEventList ) } #endif /* #if ( configNUMBER_OF_CORES == 1 ) */ - #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) -} -else -{ - /* The pxEventList was emptied before we entered the critical - * section, Nothing to do except return pdFALSE. */ - xReturn = pdFALSE; + traceRETURN_xTaskRemoveFromEventList( xReturn ); + return xReturn; } +/*-----------------------------------------------------------*/ -/* We are done accessing the kernel data group. Unlock it. */ -if( portCHECK_IF_IN_ISR() == pdTRUE ) -{ - kernelEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); -} -else +BaseType_t xTaskRemoveFromEventList( const List_t * const pxEventList ) { - kernelEXIT_CRITICAL(); -} - #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + BaseType_t xReturn; + + #if ( !( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) ) + { + /* THIS FUNCTION MUST BE CALLED FROM A CRITICAL SECTION. It can also be + * called from a critical section within an ISR. */ + xReturn = prvTaskRemoveFromEventList( pxEventList ); + } + #else /* #if ( ! ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) ) */ + { + kernelENTER_CRITICAL(); + { + /* Lock the kernel data group as we are about to access its members */ + if( listLIST_IS_EMPTY( pxEventList ) == pdFALSE ) + { + xReturn = prvTaskRemoveFromEventList( pxEventList ); + } + else + { + xReturn = pdFALSE; + } + } + kernelEXIT_CRITICAL(); + } + #endif - traceRETURN_xTaskRemoveFromEventList( xReturn ); return xReturn; } /*-----------------------------------------------------------*/ +#if ( portUSING_GRANULAR_LOCKS == 1 ) + BaseType_t xTaskRemoveFromEventListFromISR( const List_t * const pxEventList ) + { + BaseType_t xReturn; + + UBaseType_t uxSavedInterruptStatus = kernelENTER_CRITICAL_FROM_ISR(); + { + /* Lock the kernel data group as we are about to access its members */ + if( listLIST_IS_EMPTY( pxEventList ) == pdFALSE ) + { + xReturn = prvTaskRemoveFromEventList( pxEventList ); + } + else + { + xReturn = pdFALSE; + } + } + kernelEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); + + return xReturn; + } +#endif +/*-----------------------------------------------------------*/ + void vTaskRemoveFromUnorderedEventList( ListItem_t * pxEventListItem, const TickType_t xItemValue ) { From 3e7b2402cdf2578eb59bf503deb19eb8b8b59acd Mon Sep 17 00:00:00 2001 From: Sudeep Mohanty Date: Tue, 5 Aug 2025 13:30:23 +0200 Subject: [PATCH 04/16] fix(freertos-smp): Fixed Lightweight Critical Sections for deferred state change --- tasks.c | 15 +++------------ 1 file changed, 3 insertions(+), 12 deletions(-) diff --git a/tasks.c b/tasks.c index 96fc80075a7..6f256977b66 100644 --- a/tasks.c +++ b/tasks.c @@ -7850,12 +7850,7 @@ static void prvResetNextTaskUnblockTime( void ) portINCREMENT_CRITICAL_NESTING_COUNT( xCoreID ); - if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 1U - #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) - /* Check for the run state change of the task only if a deferred state change is not pending */ - && pxCurrentTCB->uxDeferredStateChange == 0U - #endif /* ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) */ - ) + if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 1U ) { prvLightWeightCheckForRunStateChange(); } @@ -7890,12 +7885,7 @@ static void prvResetNextTaskUnblockTime( void ) { portENABLE_INTERRUPTS(); - if( xYieldCurrentTask != pdFALSE - #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) - /* Yield only if no deferred state change is pending */ - && pxCurrentTCB->uxDeferredStateChange == 0U - #endif /* ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) */ - ) + if( xYieldCurrentTask != pdFALSE ) { portYIELD(); } @@ -7917,6 +7907,7 @@ static void prvResetNextTaskUnblockTime( void ) if( ( xYieldPendings[ xCoreID ] == pdTRUE ) && ( uxSchedulerSuspended == pdFALSE ) #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) && ( pxCurrentTCBs[ xCoreID ]->uxPreemptionDisable == 0U ) + && ( pxCurrentTCBs[ xCoreID ]->uxDeferredStateChange == 0U ) #endif /* ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) */ ) { From 2ac2c147162e4a061b715994230887301e08f729 Mon Sep 17 00:00:00 2001 From: Sudeep Mohanty Date: Sat, 2 Aug 2025 10:48:15 +0200 Subject: [PATCH 05/16] fix(freertos-smp): Stop unconditional yielding in vTaskPreemptionEnable --- tasks.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tasks.c b/tasks.c index 6f256977b66..9dedc7226ed 100644 --- a/tasks.c +++ b/tasks.c @@ -3334,7 +3334,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, } else { - if( ( taskTASK_IS_RUNNING( pxTCB ) == pdTRUE ) ) + if( xYieldPendings[ pxTCB->xTaskRunState ] != pdFALSE && ( taskTASK_IS_RUNNING( pxTCB ) != pdFALSE ) ) { prvYieldCore( pxTCB->xTaskRunState ); } From e9310aa1153ba84a9e9ef870c95ba07e7d8d0b84 Mon Sep 17 00:00:00 2001 From: Sudeep Mohanty Date: Tue, 5 Aug 2025 13:31:35 +0200 Subject: [PATCH 06/16] fix(freertos-smp): Fix yielding decisions based on preemption state of task --- tasks.c | 33 +++++++++++++++++++++++++-------- 1 file changed, 25 insertions(+), 8 deletions(-) diff --git a/tasks.c b/tasks.c index 9dedc7226ed..4379a63aa7d 100644 --- a/tasks.c +++ b/tasks.c @@ -1085,12 +1085,23 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; #endif { #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) + { if( pxCurrentTCBs[ xCoreID ]->uxPreemptionDisable == 0U ) - #endif + { + xLowestPriorityToPreempt = xCurrentCoreTaskPriority; + xLowestPriorityCore = xCoreID; + } + else + { + xYieldPendings[ xCoreID ] = pdTRUE; + } + } + #else { xLowestPriorityToPreempt = xCurrentCoreTaskPriority; xLowestPriorityCore = xCoreID; } + #endif } } else @@ -1391,12 +1402,23 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; ( xYieldPendings[ uxCore ] == pdFALSE ) ) { #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) + { if( pxCurrentTCBs[ uxCore ]->uxPreemptionDisable == 0U ) - #endif + { + xLowestPriority = xTaskPriority; + xLowestPriorityCore = ( BaseType_t ) uxCore; + } + else + { + xYieldPendings[ uxCore ] = pdTRUE; + } + } + #else { xLowestPriority = xTaskPriority; xLowestPriorityCore = ( BaseType_t ) uxCore; } + #endif } } } @@ -3053,12 +3075,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, /* Setting the priority of a running task down means * there may now be another task of higher priority that * is ready to execute. */ - #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) - if( pxTCB->uxPreemptionDisable == 0U ) - #endif - { - xYieldRequired = pdTRUE; - } + xYieldRequired = pdTRUE; } else { From cc09d6336c94c59b8a20919263849721407785de Mon Sep 17 00:00:00 2001 From: Sudeep Mohanty Date: Fri, 15 Aug 2025 17:03:39 +0200 Subject: [PATCH 07/16] fix(freertos-smp): Miscellaneous fixes for granular locks --- event_groups.c | 78 +++-------- include/FreeRTOS.h | 4 +- include/task.h | 98 +++++++++----- queue.c | 34 +++-- stream_buffer.c | 59 ++------- tasks.c | 318 ++++++++++++++++++++------------------------- timers.c | 48 +++++-- 7 files changed, 291 insertions(+), 348 deletions(-) diff --git a/event_groups.c b/event_groups.c index 66ddfb18f3d..8685ffbe3fb 100644 --- a/event_groups.c +++ b/event_groups.c @@ -87,24 +87,6 @@ #define event_groupsEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxEventBits ) taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); #endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */ -/* - * Locks an event group for tasks. Prevents other tasks from accessing the event group but allows - * ISRs to pend access to the event group. Caller cannot be preempted by other tasks - * after locking the event group, thus allowing the caller to execute non-deterministic - * operations. - */ - #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) - static void prvLockEventGroupForTasks( EventGroup_t * pxEventBits ) PRIVILEGED_FUNCTION; - #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ - -/* - * Unlocks an event group for tasks. Handles all pended access from ISRs, then reenables - * preemption for the caller. - */ - #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) - static BaseType_t prvUnlockEventGroupForTasks( EventGroup_t * pxEventBits ) PRIVILEGED_FUNCTION; - #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ - /* * Test the bits set in uxCurrentEventBits to see if the wait condition is met. * The wait condition is defined by xWaitForAllBits. If xWaitForAllBits is @@ -129,8 +111,22 @@ * When the task unlocks the event group, all pended access attempts are handled. */ #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) - #define event_groupsLOCK( pxEventBits ) prvLockEventGroupForTasks( pxEventBits ) - #define event_groupsUNLOCK( pxEventBits ) prvUnlockEventGroupForTasks( pxEventBits ); + #define event_groupsLOCK( pxEventBits ) taskDATA_GROUP_LOCK( &( ( pxEventBits )->xTaskSpinlock ) ) + #define event_groupsUNLOCK( pxEventBits ) \ + ( { \ + taskDATA_GROUP_UNLOCK( &( ( pxEventBits )->xTaskSpinlock ) ); \ + BaseType_t xAlreadyYielded; \ + if( xTaskUnlockCanYield() == pdTRUE ) \ + { \ + taskYIELD_WITHIN_API(); \ + xAlreadyYielded = pdTRUE; \ + } \ + else \ + { \ + xAlreadyYielded = pdFALSE; \ + } \ + xAlreadyYielded; \ + } ) #else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ #define event_groupsLOCK( pxEventBits ) vTaskSuspendAll() #define event_groupsUNLOCK( pxEventBits ) xTaskResumeAll() @@ -867,48 +863,6 @@ traceRETURN_vEventGroupClearBitsCallback(); } /*-----------------------------------------------------------*/ - #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) - static void prvLockEventGroupForTasks( EventGroup_t * pxEventBits ) - { - /* Disable preemption so that the current task cannot be preempted by another task */ - vTaskPreemptionDisable( NULL ); - - /* Keep holding xTaskSpinlock to prevent tasks on other cores from accessing - * the event group while it is suspended. */ - portGET_SPINLOCK( portGET_CORE_ID(), &( pxEventBits->xTaskSpinlock ) ); - } - #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ -/*-----------------------------------------------------------*/ - - #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) - static BaseType_t prvUnlockEventGroupForTasks( EventGroup_t * pxEventBits ) - { - BaseType_t xReturn = pdFALSE; - - /* Release the previously held task spinlock */ - portRELEASE_SPINLOCK( portGET_CORE_ID(), &( pxEventBits->xTaskSpinlock ) ); - - /* Re-enable preemption */ - vTaskPreemptionEnable( NULL ); - - /* Yield if preemption was re-enabled*/ - if( xTaskUnlockCanYield() == pdTRUE ) - { - taskYIELD_WITHIN_API(); - - /* Return true as the task was preempted */ - xReturn = pdTRUE; - } - else - { - /* Return false as the task was not preempted */ - xReturn = pdFALSE; - } - - return xReturn; - } - #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ -/*-----------------------------------------------------------*/ static BaseType_t prvTestWaitCondition( const EventBits_t uxCurrentEventBits, const EventBits_t uxBitsToWaitFor, diff --git a/include/FreeRTOS.h b/include/FreeRTOS.h index 83caf37dea5..5ea6bd79f87 100644 --- a/include/FreeRTOS.h +++ b/include/FreeRTOS.h @@ -2972,8 +2972,8 @@ * portTICK_TYPE_IS_ATOMIC was not set - map the critical sections used when * the tick count is returned to the standard critical section macros. */ #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) - #define portTICK_TYPE_ENTER_CRITICAL() portENTER_CRITICAL_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ) - #define portTICK_TYPE_EXIT_CRITICAL() portEXIT_CRITICAL_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ) + #define portTICK_TYPE_ENTER_CRITICAL() kernelENTER_CRITICAL() + #define portTICK_TYPE_EXIT_CRITICAL() kernelEXIT_CRITICAL() #else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ #define portTICK_TYPE_ENTER_CRITICAL() portENTER_CRITICAL() #define portTICK_TYPE_EXIT_CRITICAL() portEXIT_CRITICAL() diff --git a/include/task.h b/include/task.h index 9dbc8db89e0..8bdb80c199f 100644 --- a/include/task.h +++ b/include/task.h @@ -299,11 +299,11 @@ typedef enum { \ const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID(); \ /* Task spinlock is always taken first */ \ - portGET_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) pxTaskSpinlock ); \ + portGET_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) ( pxTaskSpinlock ) ); \ /* Disable interrupts */ \ portDISABLE_INTERRUPTS(); \ /* Take the ISR spinlock next */ \ - portGET_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) pxISRSpinlock ); \ + portGET_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) ( pxISRSpinlock ) ); \ /* Increment the critical nesting count */ \ portINCREMENT_CRITICAL_NESTING_COUNT( xCoreID ); \ } \ @@ -322,11 +322,13 @@ typedef enum #define taskDATA_GROUP_ENTER_CRITICAL_FROM_ISR( pxISRSpinlock, puxSavedInterruptStatus ) \ do { \ *( puxSavedInterruptStatus ) = portSET_INTERRUPT_MASK_FROM_ISR(); \ - const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID(); \ - /* Take the ISR spinlock */ \ - portGET_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) pxISRSpinlock ); \ - /* Increment the critical nesting count */ \ - portINCREMENT_CRITICAL_NESTING_COUNT( xCoreID ); \ + { \ + const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID(); \ + /* Take the ISR spinlock */ \ + portGET_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) ( pxISRSpinlock ) ); \ + /* Increment the critical nesting count */ \ + portINCREMENT_CRITICAL_NESTING_COUNT( xCoreID ); \ + } \ } while( 0 ) #endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */ @@ -339,27 +341,27 @@ typedef enum * \ingroup GranularLocks */ #if ( portUSING_GRANULAR_LOCKS == 1 ) - #define taskDATA_GROUP_EXIT_CRITICAL( pxTaskSpinlock, pxISRSpinlock ) \ - do { \ - const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID(); \ - configASSERT( portGET_CRITICAL_NESTING_COUNT( xCoreID ) > 0U ); \ - /* Release the ISR spinlock */ \ - portRELEASE_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) pxISRSpinlock ); \ - /* Release the task spinlock */ \ - portRELEASE_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) pxTaskSpinlock ); \ - /* Decrement the critical nesting count */ \ - portDECREMENT_CRITICAL_NESTING_COUNT( xCoreID ); \ - /* Enable interrupts only if the critical nesting count is 0 */ \ - if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0 ) \ - { \ - portENABLE_INTERRUPTS(); \ - } \ - else \ - { \ - mtCOVERAGE_TEST_MARKER(); \ - } \ - /* Re-enable preemption */ \ - vTaskPreemptionEnable( NULL ); \ + #define taskDATA_GROUP_EXIT_CRITICAL( pxTaskSpinlock, pxISRSpinlock ) \ + do { \ + const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID(); \ + configASSERT( portGET_CRITICAL_NESTING_COUNT( xCoreID ) > 0U ); \ + /* Release the ISR spinlock */ \ + portRELEASE_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) ( pxISRSpinlock ) ); \ + /* Release the task spinlock */ \ + portRELEASE_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) ( pxTaskSpinlock ) ); \ + /* Decrement the critical nesting count */ \ + portDECREMENT_CRITICAL_NESTING_COUNT( xCoreID ); \ + /* Enable interrupts only if the critical nesting count is 0 */ \ + if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0 ) \ + { \ + portENABLE_INTERRUPTS(); \ + } \ + else \ + { \ + mtCOVERAGE_TEST_MARKER(); \ + } \ + /* Re-enable preemption */ \ + vTaskPreemptionEnable( NULL ); \ } while( 0 ) #endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */ @@ -379,7 +381,7 @@ typedef enum /* Decrement the critical nesting count */ \ portDECREMENT_CRITICAL_NESTING_COUNT( xCoreID ); \ /* Release the ISR spinlock */ \ - portRELEASE_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) pxISRSpinlock ); \ + portRELEASE_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) ( pxISRSpinlock ) ); \ if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0 ) \ { \ portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); \ @@ -387,6 +389,44 @@ typedef enum } while( 0 ) #endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */ +/** + * task. h + * + * Macros to lock a data group (task-level lock only). + * + * \defgroup taskDATA_GROUP_LOCK taskDATA_GROUP_LOCK + * \ingroup GranularLocks + */ +#if ( portUSING_GRANULAR_LOCKS == 1 ) + #define taskDATA_GROUP_LOCK( pxTaskSpinlock ) \ + do { \ + /* Disable preemption while holding the task spinlock. */ \ + vTaskPreemptionDisable( NULL ); \ + { \ + portGET_SPINLOCK( portGET_CORE_ID(), ( portSPINLOCK_TYPE * ) ( pxTaskSpinlock ) ); \ + } \ + } while( 0 ) +#endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */ + +/** + * task. h + * + * Macros to unlock a data group (task-level lock only). + * + * \defgroup taskDATA_GROUP_UNLOCK taskDATA_GROUP_UNLOCK + * \ingroup GranularLocks + */ +#if ( portUSING_GRANULAR_LOCKS == 1 ) + #define taskDATA_GROUP_UNLOCK( pxTaskSpinlock ) \ + do { \ + { \ + portRELEASE_SPINLOCK( portGET_CORE_ID(), ( portSPINLOCK_TYPE * ) ( pxTaskSpinlock ) ); \ + } \ + /* Re-enable preemption after releasing the task spinlock. */ \ + vTaskPreemptionEnable( NULL ); \ + } while( 0 ) +#endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */ + /*----------------------------------------------------------- * TASK CREATION API *----------------------------------------------------------*/ diff --git a/queue.c b/queue.c index 21426d448b8..4b08327f384 100644 --- a/queue.c +++ b/queue.c @@ -328,25 +328,23 @@ static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength, * When the tasks unlocks the queue, all pended access attempts are handled. */ #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) - #define queueLOCK( pxQueue ) \ - do { \ - vTaskPreemptionDisable( NULL ); \ - prvLockQueue( ( pxQueue ) ); \ - portGET_SPINLOCK( portGET_CORE_ID(), &( pxQueue->xTaskSpinlock ) ); \ + #define queueLOCK( pxQueue ) \ + do { \ + taskDATA_GROUP_LOCK( &( ( pxQueue )->xTaskSpinlock ) ); \ + prvLockQueue( ( pxQueue ) ); \ } while( 0 ) - #define queueUNLOCK( pxQueue, xYieldAPI ) \ - do { \ - prvUnlockQueue( ( pxQueue ) ); \ - portRELEASE_SPINLOCK( portGET_CORE_ID(), &( pxQueue->xTaskSpinlock ) ); \ - vTaskPreemptionEnable( NULL ); \ - if( ( xYieldAPI ) == pdTRUE ) \ - { \ - taskYIELD_WITHIN_API(); \ - } \ - else \ - { \ - mtCOVERAGE_TEST_MARKER(); \ - } \ + #define queueUNLOCK( pxQueue, xYieldAPI ) \ + do { \ + prvUnlockQueue( ( pxQueue ) ); \ + taskDATA_GROUP_UNLOCK( &( ( pxQueue )->xTaskSpinlock ) ); \ + if( ( xYieldAPI ) == pdTRUE ) \ + { \ + taskYIELD_WITHIN_API(); \ + } \ + else \ + { \ + mtCOVERAGE_TEST_MARKER(); \ + } \ } while( 0 ) #else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ #define queueLOCK( pxQueue ) \ diff --git a/stream_buffer.c b/stream_buffer.c index 8dd11b8eb8a..fe52427c6be 100644 --- a/stream_buffer.c +++ b/stream_buffer.c @@ -63,10 +63,10 @@ * Macros to mark the start and end of a critical code region. */ #if ( portUSING_GRANULAR_LOCKS == 1 ) - #define sbENTER_CRITICAL( pxStreamBuffer ) taskDATA_GROUP_ENTER_CRITICAL( &pxStreamBuffer->xTaskSpinlock, &pxStreamBuffer->xISRSpinlock ) - #define sbENTER_CRITICAL_FROM_ISR( pxStreamBuffer, puxSavedInterruptStatus ) taskDATA_GROUP_ENTER_CRITICAL_FROM_ISR( &pxStreamBuffer->xISRSpinlock, puxSavedInterruptStatus ) - #define sbEXIT_CRITICAL( pxStreamBuffer ) taskDATA_GROUP_EXIT_CRITICAL( &pxStreamBuffer->xTaskSpinlock, &pxStreamBuffer->xISRSpinlock ) - #define sbEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxStreamBuffer ) taskDATA_GROUP_EXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, &pxStreamBuffer->xISRSpinlock ) + #define sbENTER_CRITICAL( pxStreamBuffer ) taskDATA_GROUP_ENTER_CRITICAL( &( ( pxStreamBuffer )->xTaskSpinlock ), &( ( pxStreamBuffer )->xISRSpinlock ) ) + #define sbENTER_CRITICAL_FROM_ISR( pxStreamBuffer, puxSavedInterruptStatus ) taskDATA_GROUP_ENTER_CRITICAL_FROM_ISR( &( ( pxStreamBuffer )->xISRSpinlock ), puxSavedInterruptStatus ) + #define sbEXIT_CRITICAL( pxStreamBuffer ) taskDATA_GROUP_EXIT_CRITICAL( &( ( pxStreamBuffer )->xTaskSpinlock ), &( ( pxStreamBuffer )->xISRSpinlock ) ) + #define sbEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxStreamBuffer ) taskDATA_GROUP_EXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, &( ( pxStreamBuffer )->xISRSpinlock ) ) #else /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */ #define sbENTER_CRITICAL( pxStreamBuffer ) taskENTER_CRITICAL(); #define sbENTER_CRITICAL_FROM_ISR( pxStreamBuffer, puxSavedInterruptStatus ) do { *( puxSavedInterruptStatus ) = taskENTER_CRITICAL_FROM_ISR(); } while( 0 ) @@ -84,8 +84,8 @@ * When the task unlocks the stream buffer, all pended access attempts are handled. */ #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) - #define sbLOCK( pxStreamBuffer ) prvLockStreamBufferForTasks( pxStreamBuffer ) - #define sbUNLOCK( pxStreamBuffer ) prvUnlockStreamBufferForTasks( pxStreamBuffer ) + #define sbLOCK( pxStreamBuffer ) taskDATA_GROUP_LOCK( &( ( pxStreamBuffer )->xTaskSpinlock ) ) + #define sbUNLOCK( pxStreamBuffer ) taskDATA_GROUP_UNLOCK( &( ( pxStreamBuffer )->xTaskSpinlock ) ) #else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ #define sbLOCK( pxStreamBuffer ) vTaskSuspendAll() #define sbUNLOCK( pxStreamBuffer ) ( void ) xTaskResumeAll() @@ -109,7 +109,7 @@ ( pxStreamBuffer )->xTaskWaitingToSend = NULL; \ } \ } \ - ( void ) sbUNLOCK( pxStreamBuffer ); \ + sbUNLOCK( pxStreamBuffer ); \ } while( 0 ) #endif /* sbRECEIVE_COMPLETED */ @@ -189,7 +189,7 @@ ( pxStreamBuffer )->xTaskWaitingToReceive = NULL; \ } \ } \ - ( void ) sbUNLOCK( pxStreamBuffer ) + sbUNLOCK( pxStreamBuffer ) #endif /* sbSEND_COMPLETED */ /* If user has provided a per-instance send completed callback, then @@ -288,24 +288,6 @@ typedef struct StreamBufferDef_t #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ } StreamBuffer_t; -/* - * Locks a stream buffer for tasks. Prevents other tasks from accessing the stream buffer - * but allows ISRs to pend access to the stream buffer. Caller cannot be preempted - * by other tasks after locking the stream buffer, thus allowing the caller to - * execute non-deterministic operations. - */ - #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) - static void prvLockStreamBufferForTasks( StreamBuffer_t * const pxStreamBuffer ) PRIVILEGED_FUNCTION; - #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ - -/* - * Unlocks a stream buffer for tasks. Handles all pended access from ISRs, then reenables preemption - * for the caller. - */ - #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) - static void prvUnlockStreamBufferForTasks( StreamBuffer_t * const pxStreamBuffer ) PRIVILEGED_FUNCTION; - #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ - /* * The number of bytes available to be read from the buffer. */ @@ -381,31 +363,6 @@ static void prvInitialiseNewStreamBuffer( StreamBuffer_t * const pxStreamBuffer, StreamBufferCallbackFunction_t pxSendCompletedCallback, StreamBufferCallbackFunction_t pxReceiveCompletedCallback ) PRIVILEGED_FUNCTION; -/*-----------------------------------------------------------*/ - - #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) - static void prvLockStreamBufferForTasks( StreamBuffer_t * const pxStreamBuffer ) - { - /* Disable preemption so that the current task cannot be preempted by another task */ - vTaskPreemptionDisable( NULL ); - - /* Keep holding xTaskSpinlock after unlocking the data group to prevent tasks - * on other cores from accessing the stream buffer while it is suspended. */ - portGET_SPINLOCK( portGET_CORE_ID(), &( pxStreamBuffer->xTaskSpinlock ) ); - } - #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ -/*-----------------------------------------------------------*/ - - #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) - static void prvUnlockStreamBufferForTasks( StreamBuffer_t * const pxStreamBuffer ) - { - /* Release the previously held task spinlock */ - portRELEASE_SPINLOCK( portGET_CORE_ID(), &( pxStreamBuffer->xTaskSpinlock ) ); - - /* Re-enable preemption */ - vTaskPreemptionEnable( NULL ); - } - #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ /*-----------------------------------------------------------*/ #if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) diff --git a/tasks.c b/tasks.c index 4379a63aa7d..a093d37bd88 100644 --- a/tasks.c +++ b/tasks.c @@ -630,6 +630,7 @@ static BaseType_t prvCreateIdleTasks( void ); #endif /* #if ( configNUMBER_OF_CORES > 1 ) */ #if ( configLIGHTWEIGHT_CRITICAL_SECTION == 1 ) + /* * Checks to see if another task moved the current task out of the ready * list while it was waiting to enter a lightweight critical section and yields, if so. @@ -971,7 +972,6 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; #if ( configLIGHTWEIGHT_CRITICAL_SECTION == 1 ) static void prvLightWeightCheckForRunStateChange( void ) { - const TCB_t * pxThisTCB; BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID(); @@ -1024,8 +1024,8 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; kernelGET_ISR_LOCK( xCoreID ); portSET_CRITICAL_NESTING_COUNT( xCoreID, uxPrevCriticalNesting ); - }; -} + } + } #endif /* #if ( configLIGHTWEIGHT_CRITICAL_SECTION == 1 ) */ /*-----------------------------------------------------------*/ @@ -1096,12 +1096,12 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; xYieldPendings[ xCoreID ] = pdTRUE; } } - #else + #else /* if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) */ { xLowestPriorityToPreempt = xCurrentCoreTaskPriority; xLowestPriorityCore = xCoreID; } - #endif + #endif /* if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) */ } } else @@ -1413,12 +1413,12 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; xYieldPendings[ uxCore ] = pdTRUE; } } - #else + #else /* if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) */ { xLowestPriority = xTaskPriority; xLowestPriorityCore = ( BaseType_t ) uxCore; } - #endif + #endif /* if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) */ } } } @@ -2840,7 +2840,15 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, traceENTER_uxTaskPriorityGet( xTask ); - kernelENTER_CRITICAL(); + #if ( ( configNUMBER_OF_CORES > 1 ) ) + { + kernelENTER_CRITICAL(); + } + #else + { + portBASE_TYPE_ENTER_CRITICAL(); + } + #endif { /* If null is passed in here then it is the priority of the task * that called uxTaskPriorityGet() that is being queried. */ @@ -2849,7 +2857,15 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, uxReturn = pxTCB->uxPriority; } - kernelEXIT_CRITICAL(); + #if ( ( configNUMBER_OF_CORES > 1 ) ) + { + kernelEXIT_CRITICAL(); + } + #else + { + portBASE_TYPE_EXIT_CRITICAL(); + } + #endif traceRETURN_uxTaskPriorityGet( uxReturn ); @@ -2918,7 +2934,15 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, traceENTER_uxTaskBasePriorityGet( xTask ); - kernelENTER_CRITICAL(); + #if ( ( configNUMBER_OF_CORES > 1 ) ) + { + kernelENTER_CRITICAL(); + } + #else + { + portBASE_TYPE_ENTER_CRITICAL(); + } + #endif { /* If null is passed in here then it is the base priority of the task * that called uxTaskBasePriorityGet() that is being queried. */ @@ -2927,7 +2951,15 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, uxReturn = pxTCB->uxBasePriority; } - kernelEXIT_CRITICAL(); + #if ( ( configNUMBER_OF_CORES > 1 ) ) + { + kernelEXIT_CRITICAL(); + } + #else + { + portBASE_TYPE_EXIT_CRITICAL(); + } + #endif traceRETURN_uxTaskBasePriorityGet( uxReturn ); @@ -3262,14 +3294,30 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, traceENTER_vTaskCoreAffinityGet( xTask ); - kernelENTER_CRITICAL(); + #if ( ( configNUMBER_OF_CORES > 1 ) ) + { + kernelENTER_CRITICAL(); + } + #else + { + portBASE_TYPE_ENTER_CRITICAL(); + } + #endif { pxTCB = prvGetTCBFromHandle( xTask ); configASSERT( pxTCB != NULL ); uxCoreAffinityMask = pxTCB->uxCoreAffinityMask; } - kernelEXIT_CRITICAL(); + #if ( ( configNUMBER_OF_CORES > 1 ) ) + { + kernelEXIT_CRITICAL(); + } + #else + { + portBASE_TYPE_EXIT_CRITICAL(); + } + #endif traceRETURN_vTaskCoreAffinityGet( uxCoreAffinityMask ); @@ -3351,7 +3399,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, } else { - if( xYieldPendings[ pxTCB->xTaskRunState ] != pdFALSE && ( taskTASK_IS_RUNNING( pxTCB ) != pdFALSE ) ) + if( ( xYieldPendings[ pxTCB->xTaskRunState ] != pdFALSE ) && ( taskTASK_IS_RUNNING( pxTCB ) != pdFALSE ) ) { prvYieldCore( pxTCB->xTaskRunState ); } @@ -4443,11 +4491,7 @@ BaseType_t xTaskResumeAll( void ) } } - if( xYieldPendings[ xCoreID ] != pdFALSE - #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) - && ( pxCurrentTCBs[ xCoreID ]->uxPreemptionDisable == 0U ) - #endif - ) + if( xYieldPendings[ xCoreID ] != pdFALSE ) { #if ( configUSE_PREEMPTION != 0 ) { @@ -5052,10 +5096,14 @@ BaseType_t xTaskIncrementTick( void ) TickType_t xItemValue; BaseType_t xSwitchRequired = pdFALSE; + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + UBaseType_t uxSavedInterruptStatus; + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + traceENTER_xTaskIncrementTick(); #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) - UBaseType_t uxSavedInterruptStatus = kernelENTER_CRITICAL_FROM_ISR(); + uxSavedInterruptStatus = kernelENTER_CRITICAL_FROM_ISR(); #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ /* Called by the portable layer each time a tick interrupt occurs. @@ -5191,32 +5239,14 @@ BaseType_t xTaskIncrementTick( void ) { #if ( configNUMBER_OF_CORES == 1 ) { - #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) - if( pxCurrentTCB->uxPreemptionDisable != 0U ) - { - mtCOVERAGE_TEST_MARKER(); - } - else - { - if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ pxCurrentTCB->uxPriority ] ) ) > 1U ) - { - xSwitchRequired = pdTRUE; - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - } - #else /* #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) */ - if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ pxCurrentTCB->uxPriority ] ) ) > 1U ) - { - xSwitchRequired = pdTRUE; - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - #endif /* #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) */ + if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ pxCurrentTCB->uxPriority ] ) ) > 1U ) + { + xSwitchRequired = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } } #else /* #if ( configNUMBER_OF_CORES == 1 ) */ { @@ -5789,6 +5819,10 @@ BaseType_t xTaskRemoveFromEventList( const List_t * const pxEventList ) TCB_t * pxUnblockedTCB; BaseType_t xReturn; + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + UBaseType_t uxSavedInterruptStatus; + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + traceENTER_xTaskRemoveFromEventList( pxEventList ); #if ( !( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) ) @@ -5797,8 +5831,6 @@ BaseType_t xTaskRemoveFromEventList( const List_t * const pxEventList ) * called from a critical section within an ISR. */ #else /* #if ( ! ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) ) */ /* Lock the kernel data group as we are about to access its members */ - UBaseType_t uxSavedInterruptStatus; - if( portCHECK_IF_IN_ISR() == pdTRUE ) { uxSavedInterruptStatus = kernelENTER_CRITICAL_FROM_ISR(); @@ -7098,9 +7130,7 @@ static void prvResetNextTaskUnblockTime( void ) traceENTER_xTaskPriorityInherit( pxMutexHolder ); - #if ( portUSING_GRANULAR_LOCKS == 1 ) - kernelENTER_CRITICAL(); - #endif + kernelENTER_CRITICAL(); { /* If the mutex is taken by an interrupt, the mutex holder is NULL. Priority * inheritance is not applied in this scenario. */ @@ -7188,9 +7218,7 @@ static void prvResetNextTaskUnblockTime( void ) mtCOVERAGE_TEST_MARKER(); } } - #if ( portUSING_GRANULAR_LOCKS == 1 ) - kernelEXIT_CRITICAL(); - #endif + kernelEXIT_CRITICAL(); traceRETURN_xTaskPriorityInherit( xReturn ); @@ -7499,7 +7527,7 @@ static void prvResetNextTaskUnblockTime( void ) #endif /* #if ( ( portCRITICAL_NESTING_IN_TCB == 1 ) && ( configNUMBER_OF_CORES == 1 ) ) */ /*-----------------------------------------------------------*/ -#if ( ( configNUMBER_OF_CORES > 1 ) && ( portUSING_GRANULAR_LOCKS != 1 ) ) +#if ( configNUMBER_OF_CORES > 1 ) void vTaskEnterCritical( void ) { @@ -7511,60 +7539,24 @@ static void prvResetNextTaskUnblockTime( void ) if( xSchedulerRunning != pdFALSE ) { - if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0U ) + /* When using granular locks, the critical section nesting count + * might have already been incremented if this call is a nested + * call from a data group critical section. Hence, we have to + * acquire the kernel task and ISR locks unconditionally. */ + #if ( portUSING_GRANULAR_LOCKS == 1 ) { kernelGET_TASK_LOCK( xCoreID ); kernelGET_ISR_LOCK( xCoreID ); } - - portINCREMENT_CRITICAL_NESTING_COUNT( xCoreID ); - - /* This is not the interrupt safe version of the enter critical - * function so assert() if it is being called from an interrupt - * context. Only API functions that end in "FromISR" can be used in an - * interrupt. Only assert if the critical nesting count is 1 to - * protect against recursive calls if the assert function also uses a - * critical section. */ - if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 1U ) + #else /* portUSING_GRANULAR_LOCKS */ { - portASSERT_IF_IN_ISR(); - - if( uxSchedulerSuspended == 0U ) + if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0U ) { - /* The only time there would be a problem is if this is called - * before a context switch and vTaskExitCritical() is called - * after pxCurrentTCB changes. Therefore this should not be - * used within vTaskSwitchContext(). */ - prvCheckForRunStateChange(); + kernelGET_TASK_LOCK( xCoreID ); + kernelGET_ISR_LOCK( xCoreID ); } } - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - } - - traceRETURN_vTaskEnterCritical(); - } - -#endif /* #if ( ( configNUMBER_OF_CORES > 1 ) && ( portUSING_GRANULAR_LOCKS != 1 ) ) */ -/*-----------------------------------------------------------*/ - -#if ( ( configNUMBER_OF_CORES > 1 ) && ( portUSING_GRANULAR_LOCKS == 1 ) ) - - void vTaskEnterCritical( void ) - { - traceENTER_vTaskEnterCritical(); - - portDISABLE_INTERRUPTS(); - { - const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID(); - - if( xSchedulerRunning != pdFALSE ) - { - kernelGET_TASK_LOCK( xCoreID ); - kernelGET_ISR_LOCK( xCoreID ); + #endif /* portUSING_GRANULAR_LOCKS */ portINCREMENT_CRITICAL_NESTING_COUNT( xCoreID ); @@ -7597,7 +7589,7 @@ static void prvResetNextTaskUnblockTime( void ) traceRETURN_vTaskEnterCritical(); } -#endif /* #if ( ( configNUMBER_OF_CORES > 1 ) && ( portUSING_GRANULAR_LOCKS == 1 ) ) */ +#endif /* #if ( configNUMBER_OF_CORES > 1 ) */ /*-----------------------------------------------------------*/ #if ( configNUMBER_OF_CORES > 1 ) @@ -7678,7 +7670,7 @@ static void prvResetNextTaskUnblockTime( void ) #endif /* #if ( ( portCRITICAL_NESTING_IN_TCB == 1 ) && ( configNUMBER_OF_CORES == 1 ) ) */ /*-----------------------------------------------------------*/ -#if ( ( configNUMBER_OF_CORES > 1 ) && ( portUSING_GRANULAR_LOCKS != 1 ) ) +#if ( configNUMBER_OF_CORES > 1 ) void vTaskExitCritical( void ) { @@ -7698,97 +7690,67 @@ static void prvResetNextTaskUnblockTime( void ) if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) > 0U ) { - portDECREMENT_CRITICAL_NESTING_COUNT( xCoreID ); - - if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0U ) + #if ( portUSING_GRANULAR_LOCKS == 1 ) { BaseType_t xYieldCurrentTask; /* Get the xYieldPending stats inside the critical section. */ - xYieldCurrentTask = xYieldPendings[ xCoreID ]; + xYieldCurrentTask = xTaskUnlockCanYield(); + /* Release the ISR and task locks first when using granular locks. */ kernelRELEASE_ISR_LOCK( xCoreID ); kernelRELEASE_TASK_LOCK( xCoreID ); - portENABLE_INTERRUPTS(); + portDECREMENT_CRITICAL_NESTING_COUNT( xCoreID ); - /* When a task yields in a critical section it just sets - * xYieldPending to true. So now that we have exited the - * critical section check if xYieldPending is true, and - * if so yield. */ - if( xYieldCurrentTask != pdFALSE ) + if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0U ) { - portYIELD(); + portENABLE_INTERRUPTS(); + + /* When a task yields in a critical section it just sets + * xYieldPending to true. So now that we have exited the + * critical section check if xYieldPending is true, and + * if so yield. */ + if( xYieldCurrentTask != pdFALSE ) + { + portYIELD(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); } } - else + #else /* portUSING_GRANULAR_LOCKS */ { - mtCOVERAGE_TEST_MARKER(); - } - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - - traceRETURN_vTaskExitCritical(); - } - -#endif /* #if ( ( configNUMBER_OF_CORES > 1 ) && ( portUSING_GRANULAR_LOCKS != 1 ) ) */ -/*-----------------------------------------------------------*/ - -#if ( ( configNUMBER_OF_CORES > 1 ) && ( portUSING_GRANULAR_LOCKS == 1 ) ) - - void vTaskExitCritical( void ) - { - const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID(); - - traceENTER_vTaskExitCritical(); - - if( xSchedulerRunning != pdFALSE ) - { - /* If critical nesting count is zero then this function - * does not match a previous call to vTaskEnterCritical(). */ - configASSERT( portGET_CRITICAL_NESTING_COUNT( xCoreID ) > 0U ); - - /* This function should not be called in ISR. Use vTaskExitCriticalFromISR - * to exit critical section from ISR. */ - portASSERT_IF_IN_ISR(); - - if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) > 0U ) - { - /* Release the ISR and task locks */ - kernelRELEASE_ISR_LOCK( xCoreID ); - kernelRELEASE_TASK_LOCK( xCoreID ); - portDECREMENT_CRITICAL_NESTING_COUNT( xCoreID ); + /* Decrement first; release locks and enable interrupts when count reaches zero. */ + portDECREMENT_CRITICAL_NESTING_COUNT( xCoreID ); - /* If the critical nesting count is 0, enable interrupts */ - if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0U ) - { - BaseType_t xYieldCurrentTask; + if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0U ) + { + BaseType_t xYieldCurrentTask; - /* Get the xYieldPending stats inside the critical section. */ - xYieldCurrentTask = xTaskUnlockCanYield(); + /* Get the xYieldPending stats inside the critical section. */ + xYieldCurrentTask = xYieldPendings[ xCoreID ]; - portENABLE_INTERRUPTS(); + kernelRELEASE_ISR_LOCK( xCoreID ); + kernelRELEASE_TASK_LOCK( xCoreID ); + portENABLE_INTERRUPTS(); - /* When a task yields in a critical section it just sets - * xYieldPending to true. So now that we have exited the - * critical section check if xYieldPending is true, and - * if so yield. */ - if( xYieldCurrentTask != pdFALSE ) + /* When a task yields in a critical section it just sets + * xYieldPending to true. So now that we have exited the + * critical section check if xYieldPending is true, and + * if so yield. */ + if( xYieldCurrentTask != pdFALSE ) + { + portYIELD(); + } + } + else { - portYIELD(); + mtCOVERAGE_TEST_MARKER(); } } - else - { - mtCOVERAGE_TEST_MARKER(); - } + #endif /* portUSING_GRANULAR_LOCKS */ } else { @@ -7803,7 +7765,7 @@ static void prvResetNextTaskUnblockTime( void ) traceRETURN_vTaskExitCritical(); } -#endif /* #if ( ( configNUMBER_OF_CORES > 1 ) && ( portUSING_GRANULAR_LOCKS == 1 ) ) */ +#endif /* #if ( configNUMBER_OF_CORES > 1 ) */ /*-----------------------------------------------------------*/ #if ( configNUMBER_OF_CORES > 1 ) @@ -7923,8 +7885,8 @@ static void prvResetNextTaskUnblockTime( void ) if( ( xYieldPendings[ xCoreID ] == pdTRUE ) && ( uxSchedulerSuspended == pdFALSE ) #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) - && ( pxCurrentTCBs[ xCoreID ]->uxPreemptionDisable == 0U ) - && ( pxCurrentTCBs[ xCoreID ]->uxDeferredStateChange == 0U ) + && ( pxCurrentTCBs[ xCoreID ]->uxPreemptionDisable == 0U ) && + ( pxCurrentTCBs[ xCoreID ]->uxDeferredStateChange == 0U ) #endif /* ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) */ ) { diff --git a/timers.c b/timers.c index 6408b651647..ed5d70982a0 100644 --- a/timers.c +++ b/timers.c @@ -83,8 +83,8 @@ * Macros to mark the start and end of a critical code region. */ #if ( portUSING_GRANULAR_LOCKS == 1 ) - #define tmrENTER_CRITICAL() taskDATA_GROUP_ENTER_CRITICAL( &xTaskSpinlock, &xISRSpinlock ) - #define tmrEXIT_CRITICAL() taskDATA_GROUP_EXIT_CRITICAL( &xTaskSpinlock, &xISRSpinlock ) + #define tmrENTER_CRITICAL() taskDATA_GROUP_ENTER_CRITICAL( &xTimerTaskSpinlock, &xTimerISRSpinlock ) + #define tmrEXIT_CRITICAL() taskDATA_GROUP_EXIT_CRITICAL( &xTimerTaskSpinlock, &xTimerISRSpinlock ) #else /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */ #define tmrENTER_CRITICAL() taskENTER_CRITICAL() #define tmrEXIT_CRITICAL() taskEXIT_CRITICAL() @@ -161,8 +161,8 @@ PRIVILEGED_DATA static TaskHandle_t xTimerTaskHandle = NULL; #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) - PRIVILEGED_DATA static portSPINLOCK_TYPE xTaskSpinlock = portINIT_SPINLOCK_STATIC; - PRIVILEGED_DATA static portSPINLOCK_TYPE xISRSpinlock = portINIT_SPINLOCK_STATIC; + PRIVILEGED_DATA static portSPINLOCK_TYPE xTimerTaskSpinlock = portINIT_SPINLOCK_STATIC; + PRIVILEGED_DATA static portSPINLOCK_TYPE xTimerISRSpinlock = portINIT_SPINLOCK_STATIC; #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ /*-----------------------------------------------------------*/ @@ -613,7 +613,15 @@ traceENTER_xTimerGetReloadMode( xTimer ); configASSERT( xTimer ); - tmrENTER_CRITICAL(); + #if ( ( configNUMBER_OF_CORES > 1 ) ) + { + tmrENTER_CRITICAL(); + } + #else + { + portBASE_TYPE_ENTER_CRITICAL(); + } + #endif { if( ( pxTimer->ucStatus & tmrSTATUS_IS_AUTORELOAD ) == 0U ) { @@ -626,7 +634,15 @@ xReturn = pdTRUE; } } - tmrEXIT_CRITICAL(); + #if ( ( configNUMBER_OF_CORES > 1 ) ) + { + tmrEXIT_CRITICAL(); + } + #else + { + portBASE_TYPE_EXIT_CRITICAL(); + } + #endif traceRETURN_xTimerGetReloadMode( xReturn ); @@ -1188,7 +1204,15 @@ configASSERT( xTimer ); /* Is the timer in the list of active timers? */ - tmrENTER_CRITICAL(); + #if ( ( configNUMBER_OF_CORES > 1 ) ) + { + tmrENTER_CRITICAL(); + } + #else + { + portBASE_TYPE_ENTER_CRITICAL(); + } + #endif { if( ( pxTimer->ucStatus & tmrSTATUS_IS_ACTIVE ) == 0U ) { @@ -1199,7 +1223,15 @@ xReturn = pdTRUE; } } - tmrEXIT_CRITICAL(); + #if ( ( configNUMBER_OF_CORES > 1 ) ) + { + tmrEXIT_CRITICAL(); + } + #else + { + portBASE_TYPE_EXIT_CRITICAL(); + } + #endif traceRETURN_xTimerIsTimerActive( xReturn ); From 0f27d662dd5c0b2efb527ef4cc03f8927eea0e39 Mon Sep 17 00:00:00 2001 From: "Ching-Hsin,Lee" Date: Wed, 20 Aug 2025 15:08:07 +0800 Subject: [PATCH 08/16] Update granular lock implementation after merge --- event_groups.c | 36 ++--- include/task.h | 64 +++++--- tasks.c | 408 +++++++++++++------------------------------------ timers.c | 35 +++-- 4 files changed, 185 insertions(+), 358 deletions(-) diff --git a/event_groups.c b/event_groups.c index 8685ffbe3fb..bf9c17ec985 100644 --- a/event_groups.c +++ b/event_groups.c @@ -112,24 +112,18 @@ */ #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) #define event_groupsLOCK( pxEventBits ) taskDATA_GROUP_LOCK( &( ( pxEventBits )->xTaskSpinlock ) ) - #define event_groupsUNLOCK( pxEventBits ) \ - ( { \ - taskDATA_GROUP_UNLOCK( &( ( pxEventBits )->xTaskSpinlock ) ); \ - BaseType_t xAlreadyYielded; \ - if( xTaskUnlockCanYield() == pdTRUE ) \ - { \ - taskYIELD_WITHIN_API(); \ - xAlreadyYielded = pdTRUE; \ - } \ - else \ - { \ - xAlreadyYielded = pdFALSE; \ - } \ - xAlreadyYielded; \ - } ) + #define event_groupsUNLOCK( pxEventBits ) taskDATA_GROUP_UNLOCK( &( ( pxEventBits )->xTaskSpinlock ) ) + #define event_groupsUNLOCK_WITH_YIELD_STATUS( pxEventBits, pxxAlreadyYielded ) \ + do { \ + taskDATA_GROUP_UNLOCK_WITH_YIELD_STATUS( &( ( pxEventBits )->xTaskSpinlock ), pxxAlreadyYielded ); \ + } while( 0 ) #else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ #define event_groupsLOCK( pxEventBits ) vTaskSuspendAll() - #define event_groupsUNLOCK( pxEventBits ) xTaskResumeAll() + #define event_groupsUNLOCK( pxEventBits ) do{ ( void ) xTaskResumeAll(); } while( 0 ) + #define event_groupsUNLOCK_WITH_YIELD_STATUS( pxEventBits, pxxAlreadyYielded ) \ + do { \ + *( pxxAlreadyYielded ) = xTaskResumeAll(); \ + } while( 0 ) #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ /*-----------------------------------------------------------*/ @@ -312,7 +306,7 @@ } } } - xAlreadyYielded = event_groupsUNLOCK( pxEventBits ); + event_groupsUNLOCK_WITH_YIELD_STATUS( pxEventBits, &xAlreadyYielded ); if( xTicksToWait != ( TickType_t ) 0 ) { @@ -468,7 +462,7 @@ traceEVENT_GROUP_WAIT_BITS_BLOCK( xEventGroup, uxBitsToWaitFor ); } } - xAlreadyYielded = event_groupsUNLOCK( pxEventBits ); + event_groupsUNLOCK_WITH_YIELD_STATUS( pxEventBits, &xAlreadyYielded ); if( xTicksToWait != ( TickType_t ) 0 ) { @@ -636,7 +630,6 @@ traceEVENT_GROUP_SET_BITS( xEventGroup, uxBitsToSet ); #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) - /* We are about to access the kernel data group non-deterministically, * thus we suspend the kernel data group.*/ vTaskSuspendAll(); @@ -717,7 +710,7 @@ ( void ) xTaskResumeAll(); #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ } - ( void ) event_groupsUNLOCK( pxEventBits ); + event_groupsUNLOCK( pxEventBits ); traceRETURN_xEventGroupSetBits( uxReturnBits ); @@ -741,7 +734,6 @@ traceEVENT_GROUP_DELETE( xEventGroup ); #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) - /* We are about to access the kernel data group non-deterministically, * thus we suspend the kernel data group.*/ vTaskSuspendAll(); @@ -759,7 +751,7 @@ ( void ) xTaskResumeAll(); #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ } - ( void ) event_groupsUNLOCK( pxEventBits ); + event_groupsUNLOCK( pxEventBits ); #if ( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 0 ) ) { diff --git a/include/task.h b/include/task.h index 4e35d920131..2569ea99fee 100644 --- a/include/task.h +++ b/include/task.h @@ -324,7 +324,7 @@ typedef enum BaseType_t xCoreID; \ *( puxSavedInterruptStatus ) = portSET_INTERRUPT_MASK_FROM_ISR(); \ { \ - const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID(); \ + xCoreID = ( BaseType_t ) portGET_CORE_ID(); \ /* Take the ISR spinlock */ \ portGET_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) ( pxISRSpinlock ) ); \ /* Increment the critical nesting count */ \ @@ -399,13 +399,28 @@ typedef enum * \ingroup GranularLocks */ #if ( portUSING_GRANULAR_LOCKS == 1 ) - #define taskDATA_GROUP_LOCK( pxTaskSpinlock ) \ + #define taskDATA_GROUP_LOCK( pxTaskSpinlock ) \ + do { \ + /* Disable preemption while holding the task spinlock. */ \ + vTaskPreemptionDisable( NULL ); \ + portGET_SPINLOCK( portGET_CORE_ID(), ( portSPINLOCK_TYPE * ) ( pxTaskSpinlock ) ); \ + } while( 0 ) +#endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */ + +/** + * task. h + * + * Macros to unlock a data group (task-level lock only). + * + * \defgroup taskDATA_GROUP_UNLOCK taskDATA_GROUP_UNLOCK + * \ingroup GranularLocks + */ +#if ( portUSING_GRANULAR_LOCKS == 1 ) + #define taskDATA_GROUP_UNLOCK( pxTaskSpinlock ) \ do { \ - /* Disable preemption while holding the task spinlock. */ \ - vTaskPreemptionDisable( NULL ); \ - { \ - portGET_SPINLOCK( portGET_CORE_ID(), ( portSPINLOCK_TYPE * ) ( pxTaskSpinlock ) ); \ - } \ + portRELEASE_SPINLOCK( portGET_CORE_ID(), ( portSPINLOCK_TYPE * ) ( pxTaskSpinlock ) ); \ + /* Re-enable preemption after releasing the task spinlock. */ \ + vTaskPreemptionEnable( NULL ); \ } while( 0 ) #endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */ @@ -418,13 +433,28 @@ typedef enum * \ingroup GranularLocks */ #if ( portUSING_GRANULAR_LOCKS == 1 ) - #define taskDATA_GROUP_UNLOCK( pxTaskSpinlock ) \ - do { \ - { \ - portRELEASE_SPINLOCK( portGET_CORE_ID(), ( portSPINLOCK_TYPE * ) ( pxTaskSpinlock ) ); \ - } \ - /* Re-enable preemption after releasing the task spinlock. */ \ - vTaskPreemptionEnable( NULL ); \ + #define taskDATA_GROUP_UNLOCK( pxTaskSpinlock ) \ + do { \ + portRELEASE_SPINLOCK( portGET_CORE_ID(), ( portSPINLOCK_TYPE * ) ( pxTaskSpinlock ) ); \ + /* Re-enable preemption after releasing the task spinlock. */ \ + vTaskPreemptionEnable( NULL ); \ + } while( 0 ) +#endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */ + +/** + * task. h + * + * Macros to unlock a data group and return the task yield status. (task-level lock only). + * + * \defgroup taskDATA_GROUP_UNLOCK taskDATA_GROUP_UNLOCK_WITH_YIELD_STATUS + * \ingroup GranularLocks + */ +#if ( portUSING_GRANULAR_LOCKS == 1 ) + #define taskDATA_GROUP_UNLOCK_WITH_YIELD_STATUS( pxTaskSpinlock, pxTaskAlreadyYielded ) \ + do { \ + portRELEASE_SPINLOCK( portGET_CORE_ID(), ( portSPINLOCK_TYPE * ) ( pxTaskSpinlock ) ); \ + /* Re-enable preemption after releasing the task spinlock. */ \ + *( pxTaskAlreadyYielded ) = xCurrentTaskPreemptionEnable(); \ } while( 0 ) #endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */ @@ -3920,13 +3950,13 @@ void vTaskInternalSetTimeOutState( TimeOut_t * const pxTimeOut ) PRIVILEGED_FUNC #endif /* - * Checks whether a yield is required after portUNLOCK_DATA_GROUP() returns. - * To be called while data group is locked. + * Enable preemption of current task asl return the task already yield status. */ #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) - BaseType_t xTaskUnlockCanYield( void ); + BaseType_t xCurrentTaskPreemptionEnable( void ); #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + #if ( portUSING_MPU_WRAPPERS == 1 ) /* diff --git a/tasks.c b/tasks.c index 9f5896b4a09..5f9db783c42 100644 --- a/tasks.c +++ b/tasks.c @@ -629,15 +629,6 @@ static BaseType_t prvCreateIdleTasks( void ); static void prvCheckForRunStateChange( void ); #endif /* #if ( configNUMBER_OF_CORES > 1 ) */ -#if ( configLIGHTWEIGHT_CRITICAL_SECTION == 1 ) - -/* - * Checks to see if another task moved the current task out of the ready - * list while it was waiting to enter a lightweight critical section and yields, if so. - */ - static void prvLightWeightCheckForRunStateChange( void ); -#endif /* #if ( configLIGHTWEIGHT_CRITICAL_SECTION == 1 ) */ - #if ( configNUMBER_OF_CORES > 1 ) /* @@ -898,6 +889,12 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; size_t n ); #endif /* #if ( ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) ) */ + +#if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) + + static BaseType_t prvTaskPreemptionEnable( const TaskHandle_t xTask ); + +#endif /* #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) */ /*-----------------------------------------------------------*/ #if ( configNUMBER_OF_CORES > 1 ) @@ -969,67 +966,6 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; /*-----------------------------------------------------------*/ -#if ( configLIGHTWEIGHT_CRITICAL_SECTION == 1 ) - static void prvLightWeightCheckForRunStateChange( void ) - { - const TCB_t * pxThisTCB; - BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID(); - - /* This must only be called from within a task. */ - portASSERT_IF_IN_ISR(); - - /* This function is always called with interrupts disabled - * so this is safe. */ - pxThisTCB = pxCurrentTCBs[ xCoreID ]; - - while( pxThisTCB->xTaskRunState == taskTASK_SCHEDULED_TO_YIELD ) - { - UBaseType_t uxPrevCriticalNesting; - - /* We are only here if we just entered a critical section - * or if we just suspended the scheduler, and another task - * has requested that we yield. - * - * This is slightly complicated since we need to save and restore - * the suspension and critical nesting counts, as well as release - * and reacquire the correct locks. And then, do it all over again - * if our state changed again during the reacquisition. */ - uxPrevCriticalNesting = portGET_CRITICAL_NESTING_COUNT( xCoreID ); - - if( uxPrevCriticalNesting > 0U ) - { - portSET_CRITICAL_NESTING_COUNT( xCoreID, 0U ); - kernelRELEASE_ISR_LOCK( xCoreID ); - } - else - { - /* The scheduler is suspended. uxSchedulerSuspended is updated - * only when the task is not requested to yield. */ - mtCOVERAGE_TEST_MARKER(); - } - - portMEMORY_BARRIER(); - - portENABLE_INTERRUPTS(); - - /* Enabling interrupts should cause this core to immediately service - * the pending interrupt and yield. After servicing the pending interrupt, - * the task needs to re-evaluate its run state within this loop, as - * other cores may have requested this task to yield, potentially altering - * its run state. */ - - portDISABLE_INTERRUPTS(); - - xCoreID = ( BaseType_t ) portGET_CORE_ID(); - kernelGET_ISR_LOCK( xCoreID ); - - portSET_CRITICAL_NESTING_COUNT( xCoreID, uxPrevCriticalNesting ); - } - } -#endif /* #if ( configLIGHTWEIGHT_CRITICAL_SECTION == 1 ) */ - -/*-----------------------------------------------------------*/ - #if ( configNUMBER_OF_CORES > 1 ) static void prvYieldForTask( const TCB_t * pxTCB ) { @@ -1047,7 +983,6 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; configASSERT( portGET_CRITICAL_NESTING_COUNT( xCurrentCoreID ) > 0U ); #if ( configRUN_MULTIPLE_PRIORITIES == 0 ) - /* No task should yield for this one if it is a lower priority * than priority level of currently ready tasks. */ if( pxTCB->uxPriority >= uxTopReadyPriority ) @@ -1096,7 +1031,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; xYieldPendings[ xCoreID ] = pdTRUE; } } - #else /* if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) */ + #else /* if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) */ { xLowestPriorityToPreempt = xCurrentCoreTaskPriority; xLowestPriorityCore = xCoreID; @@ -1413,7 +1348,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; xYieldPendings[ uxCore ] = pdTRUE; } } - #else /* if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) */ + #else /* if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) */ { xLowestPriority = xTaskPriority; xLowestPriorityCore = ( BaseType_t ) uxCore; @@ -2396,7 +2331,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, configASSERT( pxTCB != NULL ); #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) - + { /* If the task has disabled preemption, we need to defer the deletion until the * task enables preemption. The deletion will be performed in vTaskPreemptionEnable(). */ if( pxTCB->uxPreemptionDisable > 0U ) @@ -2406,9 +2341,9 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, } else { - /* Reset the deferred state change flags */ - pxTCB->uxDeferredStateChange &= ~tskDEFERRED_DELETION; + mtCOVERAGE_TEST_MARKER(); } + } #endif /* configUSE_TASK_PREEMPTION_DISABLE */ #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) @@ -3335,11 +3270,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, traceENTER_vTaskPreemptionDisable( xTask ); - #if ( configLIGHTWEIGHT_CRITICAL_SECTION == 1 ) - vKernelLightWeightEnterCritical(); - #else - kernelENTER_CRITICAL(); - #endif + kernelENTER_CRITICAL(); { if( xSchedulerRunning != pdFALSE ) { @@ -3353,11 +3284,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, mtCOVERAGE_TEST_MARKER(); } } - #if ( configLIGHTWEIGHT_CRITICAL_SECTION == 1 ) - vKernelLightWeightExitCritical(); - #else - kernelEXIT_CRITICAL(); - #endif + kernelEXIT_CRITICAL(); traceRETURN_vTaskPreemptionDisable(); } @@ -3367,19 +3294,18 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) - void vTaskPreemptionEnable( const TaskHandle_t xTask ) + static BaseType_t prvTaskPreemptionEnable( const TaskHandle_t xTask ) { TCB_t * pxTCB; - UBaseType_t uxDeferredAction = 0U; + BaseType_t xTaskAlreadyYielded = pdFALSE; + BaseType_t xCoreID; traceENTER_vTaskPreemptionEnable( xTask ); - #if ( configLIGHTWEIGHT_CRITICAL_SECTION == 1 ) - vKernelLightWeightEnterCritical(); - #else - kernelENTER_CRITICAL(); - #endif + kernelENTER_CRITICAL(); { + xCoreID = portGET_CORE_ID(); + if( xSchedulerRunning != pdFALSE ) { pxTCB = prvGetTCBFromHandle( xTask ); @@ -3394,18 +3320,36 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, * preemption was disabled. */ if( pxTCB->uxDeferredStateChange != 0U ) { - /* Capture the deferred action to perform outside critical section */ - uxDeferredAction = pxTCB->uxDeferredStateChange; + if( pxTCB->uxDeferredStateChange & tskDEFERRED_DELETION ) + { + vTaskDelete( xTask ); + } + else if( pxTCB->uxDeferredStateChange & tskDEFERRED_SUSPENSION ) + { + vTaskSuspend( xTask ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + pxTCB->uxDeferredStateChange = 0U; + xTaskAlreadyYielded = pdTRUE; } else { - if( ( xYieldPendings[ pxTCB->xTaskRunState ] != pdFALSE ) && ( taskTASK_IS_RUNNING( pxTCB ) != pdFALSE ) ) + if( pxTCB->xTaskRunState != xCoreID ) { + /* When enable preemption of other tasks, the task is + * should handle the pending yield request for other tasks. */ prvYieldCore( pxTCB->xTaskRunState ); + xTaskAlreadyYielded = pdTRUE; } else { - mtCOVERAGE_TEST_MARKER(); + /* The pending yield request will be handled after leaving + * the critical section. */ + xTaskAlreadyYielded = xYieldPendings[ xCoreID ]; } } } @@ -3419,30 +3363,21 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, mtCOVERAGE_TEST_MARKER(); } } - #if ( configLIGHTWEIGHT_CRITICAL_SECTION == 1 ) - vKernelLightWeightExitCritical(); - #else - kernelEXIT_CRITICAL(); - #endif - - /* Handle deferred actions outside critical section */ - if( uxDeferredAction != 0U ) - { - if( uxDeferredAction & tskDEFERRED_DELETION ) - { - vTaskDelete( xTask ); - } - else if( uxDeferredAction & tskDEFERRED_SUSPENSION ) - { - vTaskSuspend( xTask ); - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - } + kernelEXIT_CRITICAL(); traceRETURN_vTaskPreemptionEnable(); + + return xTaskAlreadyYielded; + } + + BaseType_t xCurrentTaskPreemptionEnable( void ) + { + return prvTaskPreemptionEnable( NULL ); + } + + void vTaskPreemptionEnable( const TaskHandle_t xTask ) + { + ( void ) prvTaskPreemptionEnable( xTask ); } #endif /* #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) */ @@ -3468,7 +3403,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, configASSERT( pxTCB != NULL ); #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) - + { /* If the task has disabled preemption, we need to defer the suspension until the * task enables preemption. The suspension will be performed in vTaskPreemptionEnable(). */ if( pxTCB->uxPreemptionDisable > 0U ) @@ -3478,9 +3413,9 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, } else { - /* Reset the deferred state change flags */ - pxTCB->uxDeferredStateChange &= ~tskDEFERRED_SUSPENSION; + mtCOVERAGE_TEST_MARKER(); } + } #endif /* configUSE_TASK_PREEMPTION_DISABLE */ #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) @@ -3727,12 +3662,10 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, configASSERT( xTaskToResume ); #if ( configNUMBER_OF_CORES == 1 ) - /* The parameter cannot be NULL as it is impossible to resume the * currently executing task. */ if( ( pxTCB != pxCurrentTCB ) && ( pxTCB != NULL ) ) #else - /* The parameter cannot be NULL as it is impossible to resume the * currently executing task. It is also impossible to resume a task * that is actively running on another core but it is not safe @@ -3744,7 +3677,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, kernelENTER_CRITICAL(); { #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) - + { /* If the task being resumed is in a deferred suspension state, * we simply clear the deferred suspension state and return. */ if( pxTCB->uxDeferredStateChange & tskDEFERRED_SUSPENSION ) @@ -3756,6 +3689,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, { mtCOVERAGE_TEST_MARKER(); } + } #endif /* configUSE_TASK_PREEMPTION_DISABLE */ #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) @@ -5717,7 +5651,6 @@ void vTaskPlaceOnEventList( List_t * const pxEventList, /* Suspend the kernel data group as we are about to access its members */ vTaskSuspendAll(); #else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ - /* THIS FUNCTION MUST BE CALLED WITH THE * SCHEDULER SUSPENDED AND THE QUEUE BEING ACCESSED LOCKED. */ configASSERT( uxSchedulerSuspended != ( UBaseType_t ) 0U ); @@ -5760,7 +5693,6 @@ void vTaskPlaceOnUnorderedEventList( List_t * pxEventList, /* Suspend the kernel data group as we are about to access its members */ vTaskSuspendAll(); #else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ - /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. It is used by * the event groups implementation. */ configASSERT( uxSchedulerSuspended != ( UBaseType_t ) 0U ); @@ -5803,7 +5735,6 @@ void vTaskPlaceOnUnorderedEventList( List_t * pxEventList, /* Suspend the kernel data group as we are about to access its members */ vTaskSuspendAll(); #else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ - /* This function should not be called by application code hence the * 'Restricted' in its name. It is not part of the public API. It is * designed for use by kernel code, and has special calling requirements - @@ -5844,35 +5775,8 @@ static BaseType_t prvTaskRemoveFromEventList( const List_t * const pxEventList ) TCB_t * pxUnblockedTCB; BaseType_t xReturn; - #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) - UBaseType_t uxSavedInterruptStatus; - #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ - traceENTER_xTaskRemoveFromEventList( pxEventList ); - #if ( !( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) ) - - /* THIS FUNCTION MUST BE CALLED FROM A CRITICAL SECTION. It can also be - * called from a critical section within an ISR. */ - #else /* #if ( ! ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) ) */ - /* Lock the kernel data group as we are about to access its members */ - if( portCHECK_IF_IN_ISR() == pdTRUE ) - { - uxSavedInterruptStatus = kernelENTER_CRITICAL_FROM_ISR(); - } - else - { - uxSavedInterruptStatus = 0; - kernelENTER_CRITICAL(); - } - - /* Before taking the kernel lock, another task/ISR could have already - * emptied the pxEventList. So we insert a check here to see if - * pxEventList is empty before attempting to remove an item from it. */ - if( listLIST_IS_EMPTY( pxEventList ) == pdFALSE ) - { - #endif /* #if ( ! ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) ) */ - /* The event list is sorted in priority order, so the first in the list can * be removed as it is known to be the highest priority. Remove the TCB from * the delayed list, and add it to the ready list. @@ -5982,7 +5886,7 @@ BaseType_t xTaskRemoveFromEventList( const List_t * const pxEventList ) } kernelEXIT_CRITICAL(); } - #endif + #endif /* if ( !( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) ) */ return xReturn; } @@ -5994,6 +5898,7 @@ BaseType_t xTaskRemoveFromEventList( const List_t * const pxEventList ) BaseType_t xReturn; UBaseType_t uxSavedInterruptStatus = kernelENTER_CRITICAL_FROM_ISR(); + { /* Lock the kernel data group as we are about to access its members */ if( listLIST_IS_EMPTY( pxEventList ) == pdFALSE ) @@ -6009,7 +5914,7 @@ BaseType_t xTaskRemoveFromEventList( const List_t * const pxEventList ) return xReturn; } -#endif +#endif /* if ( portUSING_GRANULAR_LOCKS == 1 ) */ /*-----------------------------------------------------------*/ void vTaskRemoveFromUnorderedEventList( ListItem_t * pxEventListItem, @@ -7604,20 +7509,13 @@ static void prvResetNextTaskUnblockTime( void ) * might have already been incremented if this call is a nested * call from a data group critical section. Hence, we have to * acquire the kernel task and ISR locks unconditionally. */ - #if ( portUSING_GRANULAR_LOCKS == 1 ) + #if ( portUSING_GRANULAR_LOCKS != 1 ) + if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0U ) + #endif /* portUSING_GRANULAR_LOCKS */ { kernelGET_TASK_LOCK( xCoreID ); kernelGET_ISR_LOCK( xCoreID ); } - #else /* portUSING_GRANULAR_LOCKS */ - { - if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0U ) - { - kernelGET_TASK_LOCK( xCoreID ); - kernelGET_ISR_LOCK( xCoreID ); - } - } - #endif /* portUSING_GRANULAR_LOCKS */ portINCREMENT_CRITICAL_NESTING_COUNT( xCoreID ); @@ -7751,67 +7649,58 @@ static void prvResetNextTaskUnblockTime( void ) if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) > 0U ) { - #if ( portUSING_GRANULAR_LOCKS == 1 ) - { - BaseType_t xYieldCurrentTask; - - /* Get the xYieldPending stats inside the critical section. */ - xYieldCurrentTask = xTaskUnlockCanYield(); - - /* Release the ISR and task locks first when using granular locks. */ - kernelRELEASE_ISR_LOCK( xCoreID ); - kernelRELEASE_TASK_LOCK( xCoreID ); - portDECREMENT_CRITICAL_NESTING_COUNT( xCoreID ); - - if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0U ) - { - portENABLE_INTERRUPTS(); + BaseType_t xYieldCurrentTask = pdFALSE; - /* When a task yields in a critical section it just sets - * xYieldPending to true. So now that we have exited the - * critical section check if xYieldPending is true, and - * if so yield. */ - if( xYieldCurrentTask != pdFALSE ) - { - portYIELD(); - } - } - else + /* Get the xYieldPending stats inside the critical section. */ + if( uxSchedulerSuspended == 0U ) + { + #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) + if( ( pxCurrentTCBs[ xCoreID ]->uxPreemptionDisable == 0U ) && + ( pxCurrentTCBs[ xCoreID ]->uxDeferredStateChange == 0U ) ) + #endif /* ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) */ { - mtCOVERAGE_TEST_MARKER(); + xYieldCurrentTask = xYieldPendings[ xCoreID ]; } } - #else /* portUSING_GRANULAR_LOCKS */ + else { - /* Decrement first; release locks and enable interrupts when count reaches zero. */ - portDECREMENT_CRITICAL_NESTING_COUNT( xCoreID ); - - if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0U ) - { - BaseType_t xYieldCurrentTask; + mtCOVERAGE_TEST_MARKER(); + } - /* Get the xYieldPending stats inside the critical section. */ - xYieldCurrentTask = xYieldPendings[ xCoreID ]; + /* Release the ISR and task locks first when using granular locks. */ + #if ( portUSING_GRANULAR_LOCKS == 1 ) + { + /* Critical nesting count is used to count interrupt status. + * The spinlock is implemented recursively. */ + kernelRELEASE_ISR_LOCK( xCoreID ); + kernelRELEASE_TASK_LOCK( xCoreID ); + } + #endif + portDECREMENT_CRITICAL_NESTING_COUNT( xCoreID ); + if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0U ) + { + #if ( portUSING_GRANULAR_LOCKS == 0 ) + { kernelRELEASE_ISR_LOCK( xCoreID ); kernelRELEASE_TASK_LOCK( xCoreID ); - portENABLE_INTERRUPTS(); - - /* When a task yields in a critical section it just sets - * xYieldPending to true. So now that we have exited the - * critical section check if xYieldPending is true, and - * if so yield. */ - if( xYieldCurrentTask != pdFALSE ) - { - portYIELD(); - } } - else + #endif + portENABLE_INTERRUPTS(); + + /* When a task yields in a critical section it just sets + * xYieldPending to true. So now that we have exited the + * critical section check if xYieldPending is true, and + * if so yield. */ + if( xYieldCurrentTask != pdFALSE ) { - mtCOVERAGE_TEST_MARKER(); + portYIELD(); } } - #endif /* portUSING_GRANULAR_LOCKS */ + else + { + mtCOVERAGE_TEST_MARKER(); + } } else { @@ -7875,95 +7764,6 @@ static void prvResetNextTaskUnblockTime( void ) #endif /* #if ( configNUMBER_OF_CORES > 1 ) */ /*-----------------------------------------------------------*/ -#if ( configLIGHTWEIGHT_CRITICAL_SECTION == 1 ) - - void vKernelLightWeightEnterCritical( void ) - { - if( xSchedulerRunning != pdFALSE ) - { - portDISABLE_INTERRUPTS(); - { - const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID(); - - /* Get only the ISR lock, not the task lock */ - kernelGET_ISR_LOCK( xCoreID ); - - portINCREMENT_CRITICAL_NESTING_COUNT( xCoreID ); - - if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 1U ) - { - prvLightWeightCheckForRunStateChange(); - } - } - } - } - -#endif /* #if ( configLIGHTWEIGHT_CRITICAL_SECTION == 1 ) */ -/*-----------------------------------------------------------*/ - -#if ( configLIGHTWEIGHT_CRITICAL_SECTION == 1 ) - - void vKernelLightWeightExitCritical( void ) - { - if( xSchedulerRunning != pdFALSE ) - { - const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID(); - - if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) > 0U ) - { - /* Release the ISR lock */ - kernelRELEASE_ISR_LOCK( xCoreID ); - - portDECREMENT_CRITICAL_NESTING_COUNT( xCoreID ); - - BaseType_t xYieldCurrentTask; - - xYieldCurrentTask = xTaskUnlockCanYield(); - - /* If the critical nesting count is 0, enable interrupts */ - if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0U ) - { - portENABLE_INTERRUPTS(); - - if( xYieldCurrentTask != pdFALSE ) - { - portYIELD(); - } - } - } - } - } - -#endif /* #if ( configLIGHTWEIGHT_CRITICAL_SECTION == 1 ) */ -/*-----------------------------------------------------------*/ - -#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) - - BaseType_t xTaskUnlockCanYield( void ) - { - BaseType_t xReturn; - BaseType_t xCoreID = portGET_CORE_ID(); - - if( ( xYieldPendings[ xCoreID ] == pdTRUE ) && ( uxSchedulerSuspended == pdFALSE ) - #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) - && ( pxCurrentTCBs[ xCoreID ]->uxPreemptionDisable == 0U ) && - ( pxCurrentTCBs[ xCoreID ]->uxDeferredStateChange == 0U ) - #endif /* ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) */ - ) - { - xReturn = pdTRUE; - } - else - { - xReturn = pdFALSE; - } - - return xReturn; - } - -#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ -/*-----------------------------------------------------------*/ - #if ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) static char * prvWriteNameToBuffer( char * pcBuffer, diff --git a/timers.c b/timers.c index ed5d70982a0..bee98b256e5 100644 --- a/timers.c +++ b/timers.c @@ -161,8 +161,13 @@ PRIVILEGED_DATA static TaskHandle_t xTimerTaskHandle = NULL; #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) - PRIVILEGED_DATA static portSPINLOCK_TYPE xTimerTaskSpinlock = portINIT_SPINLOCK_STATIC; - PRIVILEGED_DATA static portSPINLOCK_TYPE xTimerISRSpinlock = portINIT_SPINLOCK_STATIC; + #ifdef portREMOVE_STATIC_QUALIFIER + PRIVILEGED_DATA portSPINLOCK_TYPE xTimerTaskSpinlock = portINIT_SPINLOCK_STATIC; + PRIVILEGED_DATA portSPINLOCK_TYPE xTimerISRSpinlock = portINIT_SPINLOCK_STATIC; + #else + PRIVILEGED_DATA static portSPINLOCK_TYPE xTimerTaskSpinlock = portINIT_SPINLOCK_STATIC; + PRIVILEGED_DATA static portSPINLOCK_TYPE xTimerISRSpinlock = portINIT_SPINLOCK_STATIC; + #endif #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ /*-----------------------------------------------------------*/ @@ -1080,19 +1085,19 @@ case tmrCOMMAND_DELETE: #if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) - { - /* The timer has already been removed from the active list, - * just free up the memory if the memory was dynamically - * allocated. */ - if( ( pxTimer->ucStatus & tmrSTATUS_IS_STATICALLY_ALLOCATED ) == ( uint8_t ) 0 ) - { - vPortFree( pxTimer ); - } - else - { - pxTimer->ucStatus &= ( ( uint8_t ) ~tmrSTATUS_IS_ACTIVE ); - } - } + { + /* The timer has already been removed from the active list, + * just free up the memory if the memory was dynamically + * allocated. */ + if( ( pxTimer->ucStatus & tmrSTATUS_IS_STATICALLY_ALLOCATED ) == ( uint8_t ) 0 ) + { + vPortFree( pxTimer ); + } + else + { + pxTimer->ucStatus &= ( ( uint8_t ) ~tmrSTATUS_IS_ACTIVE ); + } + } #else /* if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) */ { /* If dynamic allocation is not enabled, the memory From ad4577283a04d160fb89be84e6cca45a88bb7d2e Mon Sep 17 00:00:00 2001 From: "Ching-Hsin,Lee" Date: Wed, 20 Aug 2025 15:51:28 +0800 Subject: [PATCH 09/16] Update merge result --- include/task.h | 3 +-- queue.c | 4 ++-- tasks.c | 44 +++++++++++++++++++++++++++----------------- 3 files changed, 30 insertions(+), 21 deletions(-) diff --git a/include/task.h b/include/task.h index 2569ea99fee..8a1a41e41eb 100644 --- a/include/task.h +++ b/include/task.h @@ -321,10 +321,9 @@ typedef enum #if ( portUSING_GRANULAR_LOCKS == 1 ) #define taskDATA_GROUP_ENTER_CRITICAL_FROM_ISR( pxISRSpinlock, puxSavedInterruptStatus ) \ do { \ - BaseType_t xCoreID; \ *( puxSavedInterruptStatus ) = portSET_INTERRUPT_MASK_FROM_ISR(); \ { \ - xCoreID = ( BaseType_t ) portGET_CORE_ID(); \ + const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID(); \ /* Take the ISR spinlock */ \ portGET_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) ( pxISRSpinlock ) ); \ /* Increment the critical nesting count */ \ diff --git a/queue.c b/queue.c index f2eef46ce6c..469603f12af 100644 --- a/queue.c +++ b/queue.c @@ -2125,9 +2125,9 @@ BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue, if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE ) { #if ( portUSING_GRANULAR_LOCKS == 1 ) - if( xTaskRemoveFromEventListFromISR( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) + if( xTaskRemoveFromEventListFromISR( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE ) #else - if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) + if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE ) #endif { /* The task waiting has a higher priority than us so diff --git a/tasks.c b/tasks.c index 5f9db783c42..975b5390196 100644 --- a/tasks.c +++ b/tasks.c @@ -2341,7 +2341,8 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, } else { - mtCOVERAGE_TEST_MARKER(); + /* Reset the deferred state change flags */ + pxTCB->uxDeferredStateChange &= ~tskDEFERRED_DELETION; } } #endif /* configUSE_TASK_PREEMPTION_DISABLE */ @@ -3300,8 +3301,6 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, BaseType_t xTaskAlreadyYielded = pdFALSE; BaseType_t xCoreID; - traceENTER_vTaskPreemptionEnable( xTask ); - kernelENTER_CRITICAL(); { xCoreID = portGET_CORE_ID(); @@ -3338,18 +3337,25 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, } else { - if( pxTCB->xTaskRunState != xCoreID ) + if( taskTASK_IS_RUNNING( pxTCB ) == pdTRUE ) { - /* When enable preemption of other tasks, the task is - * should handle the pending yield request for other tasks. */ - prvYieldCore( pxTCB->xTaskRunState ); - xTaskAlreadyYielded = pdTRUE; + if( pxTCB->xTaskRunState != xCoreID ) + { + /* When enable preemption of other tasks, the task is + * should handle the pending yield request for other tasks. */ + prvYieldCore( pxTCB->xTaskRunState ); + xTaskAlreadyYielded = pdTRUE; + } + else + { + /* The pending yield request will be handled after leaving + * the critical section. */ + xTaskAlreadyYielded = xYieldPendings[ xCoreID ]; + } } else { - /* The pending yield request will be handled after leaving - * the critical section. */ - xTaskAlreadyYielded = xYieldPendings[ xCoreID ]; + mtCOVERAGE_TEST_MARKER(); } } } @@ -3365,8 +3371,6 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, } kernelEXIT_CRITICAL(); - traceRETURN_vTaskPreemptionEnable(); - return xTaskAlreadyYielded; } @@ -3377,7 +3381,11 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, void vTaskPreemptionEnable( const TaskHandle_t xTask ) { + traceENTER_vTaskPreemptionEnable( xTask ); + ( void ) prvTaskPreemptionEnable( xTask ); + + traceRETURN_vTaskPreemptionEnable(); } #endif /* #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) */ @@ -3413,7 +3421,8 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, } else { - mtCOVERAGE_TEST_MARKER(); + /* Reset the deferred state change flags */ + pxTCB->uxDeferredStateChange &= ~tskDEFERRED_SUSPENSION; } } #endif /* configUSE_TASK_PREEMPTION_DISABLE */ @@ -5775,8 +5784,6 @@ static BaseType_t prvTaskRemoveFromEventList( const List_t * const pxEventList ) TCB_t * pxUnblockedTCB; BaseType_t xReturn; - traceENTER_xTaskRemoveFromEventList( pxEventList ); - /* The event list is sorted in priority order, so the first in the list can * be removed as it is known to be the highest priority. Remove the TCB from * the delayed list, and add it to the ready list. @@ -5855,7 +5862,6 @@ static BaseType_t prvTaskRemoveFromEventList( const List_t * const pxEventList ) } #endif /* #if ( configNUMBER_OF_CORES == 1 ) */ - traceRETURN_xTaskRemoveFromEventList( xReturn ); return xReturn; } /*-----------------------------------------------------------*/ @@ -5864,6 +5870,8 @@ BaseType_t xTaskRemoveFromEventList( const List_t * const pxEventList ) { BaseType_t xReturn; + traceENTER_xTaskRemoveFromEventList( pxEventList ); + #if ( !( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) ) { /* THIS FUNCTION MUST BE CALLED FROM A CRITICAL SECTION. It can also be @@ -5888,6 +5896,8 @@ BaseType_t xTaskRemoveFromEventList( const List_t * const pxEventList ) } #endif /* if ( !( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) ) */ + traceRETURN_xTaskRemoveFromEventList( xReturn ); + return xReturn; } /*-----------------------------------------------------------*/ From bd5db0225e3e8419eb09038b19c9bc2175826ee5 Mon Sep 17 00:00:00 2001 From: "Ching-Hsin,Lee" Date: Wed, 20 Aug 2025 16:24:37 +0800 Subject: [PATCH 10/16] Fix and refine queue granular lock implementation --- queue.c | 124 +++++++++++++++++++++----------------------------------- 1 file changed, 47 insertions(+), 77 deletions(-) diff --git a/queue.c b/queue.c index 469603f12af..b7fd261b1c7 100644 --- a/queue.c +++ b/queue.c @@ -221,12 +221,10 @@ static void prvCopyDataFromQueue( Queue_t * const pxQueue, * Checks to see if a queue is a member of a queue set, and if so, notifies * the queue set that the queue contains data. */ - static BaseType_t prvNotifyQueueSetContainer( const Queue_t * const pxQueue ) PRIVILEGED_FUNCTION; - #if ( portUSING_GRANULAR_LOCKS == 1 ) - static BaseType_t prvNotifyQueueSetContainerFromISR( const Queue_t * const pxQueue ); - #else - #define prvNotifyQueueSetContainerFromISR prvNotifyQueueSetContainer - #endif + static BaseType_t prvNotifyQueueSetContainerGeneric( const Queue_t * const pxQueue, + BaseType_t xNotifyFromISR ) PRIVILEGED_FUNCTION; + #define prvNotifyQueueSetContainer( pxQueue ) prvNotifyQueueSetContainerGeneric( pxQueue, pdFALSE ) + #define prvNotifyQueueSetContainerFromISR( pxQueue ) prvNotifyQueueSetContainerGeneric( pxQueue, pdTRUE ) #endif /* @@ -1299,7 +1297,7 @@ BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue, * in the queue has not changed. */ mtCOVERAGE_TEST_MARKER(); } - else if( prvNotifyQueueSetContainer( pxQueue ) != pdFALSE ) + else if( prvNotifyQueueSetContainerFromISR( pxQueue ) != pdFALSE ) { /* The queue is a member of a queue set, and posting * to the queue set caused a higher priority task to @@ -1477,7 +1475,7 @@ BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue, { if( pxQueue->pxQueueSetContainer != NULL ) { - if( prvNotifyQueueSetContainer( pxQueue ) != pdFALSE ) + if( prvNotifyQueueSetContainerFromISR( pxQueue ) != pdFALSE ) { /* The semaphore is a member of a queue set, and * posting to the queue set caused a higher priority @@ -3369,66 +3367,16 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue ) /*-----------------------------------------------------------*/ #if ( configUSE_QUEUE_SETS == 1 ) - - static BaseType_t prvNotifyQueueSetContainer( const Queue_t * const pxQueue ) + static BaseType_t prvNotifyQueueSetContainerGeneric( const Queue_t * const pxQueue, + BaseType_t xNotifyFromISR ) { Queue_t * pxQueueSetContainer = pxQueue->pxQueueSetContainer; BaseType_t xReturn = pdFALSE; - /* This function must be called form a critical section. */ - - /* The following line is not reachable in unit tests because every call - * to prvNotifyQueueSetContainer is preceded by a check that - * pxQueueSetContainer != NULL */ - configASSERT( pxQueueSetContainer ); /* LCOV_EXCL_BR_LINE */ - configASSERT( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength ); - - if( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength ) - { - const int8_t cTxLock = pxQueueSetContainer->cTxLock; - - traceQUEUE_SET_SEND( pxQueueSetContainer ); - - /* The data copied is the handle of the queue that contains data. */ - xReturn = prvCopyDataToQueue( pxQueueSetContainer, &pxQueue, queueSEND_TO_BACK ); - - if( cTxLock == queueUNLOCKED ) - { - if( listLIST_IS_EMPTY( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) == pdFALSE ) - { - if( xTaskRemoveFromEventList( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) != pdFALSE ) - { - /* The task waiting has a higher priority. */ - xReturn = pdTRUE; - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - } - else - { - prvIncrementQueueTxLock( pxQueueSetContainer, cTxLock ); - } - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - - return xReturn; - } - - #if ( portUSING_GRANULAR_LOCKS == 1 ) - static BaseType_t prvNotifyQueueSetContainerFromISR( const Queue_t * const pxQueue ) - { - Queue_t * pxQueueSetContainer = pxQueue->pxQueueSetContainer; - BaseType_t xReturn = pdFALSE; + #if ( portUSING_GRANULAR_LOCKS != 1 ) + /* This function should be called in critical section. */ + ( void ) xNotifyFromISR; + #endif /* This function must be called form a critical section. */ @@ -3451,19 +3399,42 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue ) { if( listLIST_IS_EMPTY( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) == pdFALSE ) { - #if ( portUSING_GRANULAR_LOCKS == 1 ) - if( xTaskRemoveFromEventListFromISR( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) + #if ( portUSING_GRANULAR_LOCKS != 1 ) + if( xTaskRemoveFromEventList( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) != pdFALSE ) + { + /* The task waiting has a higher priority. */ + xReturn = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } #else - if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) - #endif - { - /* The task waiting has a higher priority. */ - xReturn = pdTRUE; - } - else - { - mtCOVERAGE_TEST_MARKER(); - } + if( xNotifyFromISR != pdTRUE ) + { + if( xTaskRemoveFromEventList( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) != pdFALSE ) + { + /* The task waiting has a higher priority. */ + xReturn = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + if( xTaskRemoveFromEventListFromISR( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) != pdFALSE ) + { + /* The task waiting has a higher priority. */ + xReturn = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #endif /* if ( portUSING_GRANULAR_LOCKS != 1 ) */ } else { @@ -3482,6 +3453,5 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue ) return xReturn; } - #endif #endif /* configUSE_QUEUE_SETS */ From 1d730b7fbed953f81c30fd71b0c87aee784d0dd1 Mon Sep 17 00:00:00 2001 From: "Ching-Hsin,Lee" Date: Wed, 20 Aug 2025 16:40:46 +0800 Subject: [PATCH 11/16] Queue logic refine --- queue.c | 93 ++++++++++++++++++++++++--------------------------------- 1 file changed, 39 insertions(+), 54 deletions(-) diff --git a/queue.c b/queue.c index b7fd261b1c7..4cd7a27a77a 100644 --- a/queue.c +++ b/queue.c @@ -95,6 +95,20 @@ typedef struct SemaphoreData #endif /* #if ( configNUMBER_OF_CORES == 1 ) */ #endif +#if ( portUSING_GRANULAR_LOCKS == 1 ) + + /* Kernel data and queue data are in different protection domains, each guarded + * by its own critical section. Use the kernel-specific API to access kernel data. */ + #define queueREMOVE_TASK_FROM_EVENT_LIST xTaskRemoveFromEventList + #define queueREMOVE_TASK_FROM_EVENT_LIST_FROM_ISR xTaskRemoveFromEventListFromISR +#else + + /* A single critical section protects both kernel and queue data. + * The same API is used for task removal in both normal and ISR contexts. */ + #define queueREMOVE_TASK_FROM_EVENT_LIST xTaskRemoveFromEventList + #define queueREMOVE_TASK_FROM_EVENT_LIST_FROM_ISR xTaskRemoveFromEventList +#endif + /* * Definition of the queue used by the scheduler. * Items are queued by copy, not reference. See the following link for the @@ -415,7 +429,7 @@ BaseType_t xQueueGenericReset( QueueHandle_t xQueue, * it will be possible to write to it. */ if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE ) { - if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE ) + if( queueREMOVE_TASK_FROM_EVENT_LIST( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE ) { queueYIELD_IF_USING_PREEMPTION(); } @@ -1093,7 +1107,7 @@ BaseType_t xQueueGenericSend( QueueHandle_t xQueue, * queue then unblock it now. */ if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE ) { - if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) + if( queueREMOVE_TASK_FROM_EVENT_LIST( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) { /* The unblocked task has a priority higher than * our own so yield immediately. Yes it is ok to @@ -1128,7 +1142,7 @@ BaseType_t xQueueGenericSend( QueueHandle_t xQueue, * queue then unblock it now. */ if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE ) { - if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) + if( queueREMOVE_TASK_FROM_EVENT_LIST( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) { /* The unblocked task has a priority higher than * our own so yield immediately. Yes it is ok to do @@ -1320,11 +1334,7 @@ BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue, { if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE ) { - #if ( portUSING_GRANULAR_LOCKS == 1 ) - if( xTaskRemoveFromEventListFromISR( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) - #else - if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) - #endif + if( queueREMOVE_TASK_FROM_EVENT_LIST_FROM_ISR( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) { /* The task waiting has a higher priority so * record that a context switch is required. */ @@ -1352,7 +1362,7 @@ BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue, { if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE ) { - if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) + if( queueREMOVE_TASK_FROM_EVENT_LIST( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) { /* The task waiting has a higher priority so record that a * context switch is required. */ @@ -1498,11 +1508,7 @@ BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue, { if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE ) { - #if ( portUSING_GRANULAR_LOCKS == 1 ) - if( xTaskRemoveFromEventListFromISR( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) - #else - if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) - #endif + if( queueREMOVE_TASK_FROM_EVENT_LIST_FROM_ISR( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) { /* The task waiting has a higher priority so * record that a context switch is required. */ @@ -1530,7 +1536,7 @@ BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue, { if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE ) { - if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) + if( queueREMOVE_TASK_FROM_EVENT_LIST( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) { /* The task waiting has a higher priority so record that a * context switch is required. */ @@ -1622,7 +1628,7 @@ BaseType_t xQueueReceive( QueueHandle_t xQueue, * task. */ if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE ) { - if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE ) + if( queueREMOVE_TASK_FROM_EVENT_LIST( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE ) { queueYIELD_IF_USING_PREEMPTION(); } @@ -1780,7 +1786,7 @@ BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue, * semaphore, and if so, unblock the highest priority such task. */ if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE ) { - if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE ) + if( queueREMOVE_TASK_FROM_EVENT_LIST( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE ) { queueYIELD_IF_USING_PREEMPTION(); } @@ -1972,7 +1978,7 @@ BaseType_t xQueuePeek( QueueHandle_t xQueue, * any other tasks waiting for the data. */ if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE ) { - if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) + if( queueREMOVE_TASK_FROM_EVENT_LIST( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) { /* The task waiting has a higher priority than this task. */ queueYIELD_IF_USING_PREEMPTION(); @@ -2122,11 +2128,7 @@ BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue, { if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE ) { - #if ( portUSING_GRANULAR_LOCKS == 1 ) - if( xTaskRemoveFromEventListFromISR( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE ) - #else - if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE ) - #endif + if( queueREMOVE_TASK_FROM_EVENT_LIST_FROM_ISR( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE ) { /* The task waiting has a higher priority than us so * force a context switch. */ @@ -2571,7 +2573,7 @@ static void prvUnlockQueue( Queue_t * const pxQueue ) * suspended. */ if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE ) { - if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) + if( queueREMOVE_TASK_FROM_EVENT_LIST( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) { /* The task waiting has a higher priority so record that a * context switch is required. */ @@ -2594,7 +2596,7 @@ static void prvUnlockQueue( Queue_t * const pxQueue ) * the pending ready list as the scheduler is still suspended. */ if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE ) { - if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) + if( queueREMOVE_TASK_FROM_EVENT_LIST( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) { /* The task waiting has a higher priority so record that * a context switch is required. */ @@ -2628,7 +2630,7 @@ static void prvUnlockQueue( Queue_t * const pxQueue ) { if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE ) { - if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE ) + if( queueREMOVE_TASK_FROM_EVENT_LIST( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE ) { vTaskMissedYield(); } @@ -3373,11 +3375,6 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue ) Queue_t * pxQueueSetContainer = pxQueue->pxQueueSetContainer; BaseType_t xReturn = pdFALSE; - #if ( portUSING_GRANULAR_LOCKS != 1 ) - /* This function should be called in critical section. */ - ( void ) xNotifyFromISR; - #endif - /* This function must be called form a critical section. */ /* The following line is not reachable in unit tests because every call @@ -3399,8 +3396,9 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue ) { if( listLIST_IS_EMPTY( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) == pdFALSE ) { - #if ( portUSING_GRANULAR_LOCKS != 1 ) - if( xTaskRemoveFromEventList( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) != pdFALSE ) + if( xNotifyFromISR != pdTRUE ) + { + if( queueREMOVE_TASK_FROM_EVENT_LIST( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) != pdFALSE ) { /* The task waiting has a higher priority. */ xReturn = pdTRUE; @@ -3409,32 +3407,19 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue ) { mtCOVERAGE_TEST_MARKER(); } - #else - if( xNotifyFromISR != pdTRUE ) + } + else + { + if( queueREMOVE_TASK_FROM_EVENT_LIST_FROM_ISR( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) != pdFALSE ) { - if( xTaskRemoveFromEventList( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) != pdFALSE ) - { - /* The task waiting has a higher priority. */ - xReturn = pdTRUE; - } - else - { - mtCOVERAGE_TEST_MARKER(); - } + /* The task waiting has a higher priority. */ + xReturn = pdTRUE; } else { - if( xTaskRemoveFromEventListFromISR( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) != pdFALSE ) - { - /* The task waiting has a higher priority. */ - xReturn = pdTRUE; - } - else - { - mtCOVERAGE_TEST_MARKER(); - } + mtCOVERAGE_TEST_MARKER(); } - #endif /* if ( portUSING_GRANULAR_LOCKS != 1 ) */ + } } else { From 0198b6dfe488c8f3bdddf198e58041f30c607fa6 Mon Sep 17 00:00:00 2001 From: "Ching-Hsin,Lee" Date: Wed, 20 Aug 2025 16:42:46 +0800 Subject: [PATCH 12/16] Fix timers.c format --- timers.c | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/timers.c b/timers.c index bee98b256e5..53097568ccc 100644 --- a/timers.c +++ b/timers.c @@ -1085,19 +1085,19 @@ case tmrCOMMAND_DELETE: #if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) - { - /* The timer has already been removed from the active list, - * just free up the memory if the memory was dynamically - * allocated. */ - if( ( pxTimer->ucStatus & tmrSTATUS_IS_STATICALLY_ALLOCATED ) == ( uint8_t ) 0 ) - { - vPortFree( pxTimer ); - } - else - { - pxTimer->ucStatus &= ( ( uint8_t ) ~tmrSTATUS_IS_ACTIVE ); - } - } + { + /* The timer has already been removed from the active list, + * just free up the memory if the memory was dynamically + * allocated. */ + if( ( pxTimer->ucStatus & tmrSTATUS_IS_STATICALLY_ALLOCATED ) == ( uint8_t ) 0 ) + { + vPortFree( pxTimer ); + } + else + { + pxTimer->ucStatus &= ( ( uint8_t ) ~tmrSTATUS_IS_ACTIVE ); + } + } #else /* if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) */ { /* If dynamic allocation is not enabled, the memory From 13b8609e5676c2fe5f03e239561eb8ee300265d7 Mon Sep 17 00:00:00 2001 From: "Ching-Hsin,Lee" Date: Thu, 21 Aug 2025 12:15:24 +0800 Subject: [PATCH 13/16] Update xTaskRemoveFromEventListFromISR implementation --- include/task.h | 4 +--- queue.c | 46 ++++++++++++++++------------------------------ tasks.c | 26 ++++++++++++++++++-------- 3 files changed, 35 insertions(+), 41 deletions(-) diff --git a/include/task.h b/include/task.h index 8a1a41e41eb..94141ad0afc 100644 --- a/include/task.h +++ b/include/task.h @@ -3751,9 +3751,7 @@ void vTaskPlaceOnEventListRestricted( List_t * const pxEventList, * making the call, otherwise pdFALSE. */ BaseType_t xTaskRemoveFromEventList( const List_t * const pxEventList ) PRIVILEGED_FUNCTION; -#if ( portUSING_GRANULAR_LOCKS == 1 ) - BaseType_t xTaskRemoveFromEventListFromISR( const List_t * const pxEventList ) PRIVILEGED_FUNCTION; -#endif +BaseType_t xTaskRemoveFromEventListFromISR( const List_t * const pxEventList ) PRIVILEGED_FUNCTION; void vTaskRemoveFromUnorderedEventList( ListItem_t * pxEventListItem, const TickType_t xItemValue ) PRIVILEGED_FUNCTION; diff --git a/queue.c b/queue.c index 4cd7a27a77a..0cdf61b1160 100644 --- a/queue.c +++ b/queue.c @@ -95,20 +95,6 @@ typedef struct SemaphoreData #endif /* #if ( configNUMBER_OF_CORES == 1 ) */ #endif -#if ( portUSING_GRANULAR_LOCKS == 1 ) - - /* Kernel data and queue data are in different protection domains, each guarded - * by its own critical section. Use the kernel-specific API to access kernel data. */ - #define queueREMOVE_TASK_FROM_EVENT_LIST xTaskRemoveFromEventList - #define queueREMOVE_TASK_FROM_EVENT_LIST_FROM_ISR xTaskRemoveFromEventListFromISR -#else - - /* A single critical section protects both kernel and queue data. - * The same API is used for task removal in both normal and ISR contexts. */ - #define queueREMOVE_TASK_FROM_EVENT_LIST xTaskRemoveFromEventList - #define queueREMOVE_TASK_FROM_EVENT_LIST_FROM_ISR xTaskRemoveFromEventList -#endif - /* * Definition of the queue used by the scheduler. * Items are queued by copy, not reference. See the following link for the @@ -429,7 +415,7 @@ BaseType_t xQueueGenericReset( QueueHandle_t xQueue, * it will be possible to write to it. */ if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE ) { - if( queueREMOVE_TASK_FROM_EVENT_LIST( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE ) + if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE ) { queueYIELD_IF_USING_PREEMPTION(); } @@ -1107,7 +1093,7 @@ BaseType_t xQueueGenericSend( QueueHandle_t xQueue, * queue then unblock it now. */ if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE ) { - if( queueREMOVE_TASK_FROM_EVENT_LIST( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) + if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) { /* The unblocked task has a priority higher than * our own so yield immediately. Yes it is ok to @@ -1142,7 +1128,7 @@ BaseType_t xQueueGenericSend( QueueHandle_t xQueue, * queue then unblock it now. */ if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE ) { - if( queueREMOVE_TASK_FROM_EVENT_LIST( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) + if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) { /* The unblocked task has a priority higher than * our own so yield immediately. Yes it is ok to do @@ -1334,7 +1320,7 @@ BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue, { if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE ) { - if( queueREMOVE_TASK_FROM_EVENT_LIST_FROM_ISR( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) + if( xTaskRemoveFromEventListFromISR( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) { /* The task waiting has a higher priority so * record that a context switch is required. */ @@ -1362,7 +1348,7 @@ BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue, { if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE ) { - if( queueREMOVE_TASK_FROM_EVENT_LIST( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) + if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) { /* The task waiting has a higher priority so record that a * context switch is required. */ @@ -1508,7 +1494,7 @@ BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue, { if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE ) { - if( queueREMOVE_TASK_FROM_EVENT_LIST_FROM_ISR( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) + if( xTaskRemoveFromEventListFromISR( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) { /* The task waiting has a higher priority so * record that a context switch is required. */ @@ -1536,7 +1522,7 @@ BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue, { if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE ) { - if( queueREMOVE_TASK_FROM_EVENT_LIST( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) + if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) { /* The task waiting has a higher priority so record that a * context switch is required. */ @@ -1628,7 +1614,7 @@ BaseType_t xQueueReceive( QueueHandle_t xQueue, * task. */ if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE ) { - if( queueREMOVE_TASK_FROM_EVENT_LIST( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE ) + if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE ) { queueYIELD_IF_USING_PREEMPTION(); } @@ -1786,7 +1772,7 @@ BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue, * semaphore, and if so, unblock the highest priority such task. */ if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE ) { - if( queueREMOVE_TASK_FROM_EVENT_LIST( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE ) + if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE ) { queueYIELD_IF_USING_PREEMPTION(); } @@ -1978,7 +1964,7 @@ BaseType_t xQueuePeek( QueueHandle_t xQueue, * any other tasks waiting for the data. */ if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE ) { - if( queueREMOVE_TASK_FROM_EVENT_LIST( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) + if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) { /* The task waiting has a higher priority than this task. */ queueYIELD_IF_USING_PREEMPTION(); @@ -2128,7 +2114,7 @@ BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue, { if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE ) { - if( queueREMOVE_TASK_FROM_EVENT_LIST_FROM_ISR( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE ) + if( xTaskRemoveFromEventListFromISR( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE ) { /* The task waiting has a higher priority than us so * force a context switch. */ @@ -2573,7 +2559,7 @@ static void prvUnlockQueue( Queue_t * const pxQueue ) * suspended. */ if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE ) { - if( queueREMOVE_TASK_FROM_EVENT_LIST( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) + if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) { /* The task waiting has a higher priority so record that a * context switch is required. */ @@ -2596,7 +2582,7 @@ static void prvUnlockQueue( Queue_t * const pxQueue ) * the pending ready list as the scheduler is still suspended. */ if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE ) { - if( queueREMOVE_TASK_FROM_EVENT_LIST( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) + if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) { /* The task waiting has a higher priority so record that * a context switch is required. */ @@ -2630,7 +2616,7 @@ static void prvUnlockQueue( Queue_t * const pxQueue ) { if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE ) { - if( queueREMOVE_TASK_FROM_EVENT_LIST( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE ) + if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE ) { vTaskMissedYield(); } @@ -3398,7 +3384,7 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue ) { if( xNotifyFromISR != pdTRUE ) { - if( queueREMOVE_TASK_FROM_EVENT_LIST( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) != pdFALSE ) + if( xTaskRemoveFromEventList( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) != pdFALSE ) { /* The task waiting has a higher priority. */ xReturn = pdTRUE; @@ -3410,7 +3396,7 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue ) } else { - if( queueREMOVE_TASK_FROM_EVENT_LIST_FROM_ISR( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) != pdFALSE ) + if( xTaskRemoveFromEventListFromISR( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) != pdFALSE ) { /* The task waiting has a higher priority. */ xReturn = pdTRUE; diff --git a/tasks.c b/tasks.c index 975b5390196..8adf9d1d37c 100644 --- a/tasks.c +++ b/tasks.c @@ -895,6 +895,9 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; static BaseType_t prvTaskPreemptionEnable( const TaskHandle_t xTask ); #endif /* #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) */ + +static BaseType_t prvTaskRemoveFromEventList( const List_t * const pxEventList ); + /*-----------------------------------------------------------*/ #if ( configNUMBER_OF_CORES > 1 ) @@ -5902,13 +5905,19 @@ BaseType_t xTaskRemoveFromEventList( const List_t * const pxEventList ) } /*-----------------------------------------------------------*/ -#if ( portUSING_GRANULAR_LOCKS == 1 ) - BaseType_t xTaskRemoveFromEventListFromISR( const List_t * const pxEventList ) - { - BaseType_t xReturn; +BaseType_t xTaskRemoveFromEventListFromISR( const List_t * const pxEventList ) +{ + BaseType_t xReturn; + #if ( !( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) ) + { + /* THIS FUNCTION MUST BE CALLED FROM A CRITICAL SECTION. It can also be + * called from a critical section within an ISR. */ + xReturn = prvTaskRemoveFromEventList( pxEventList ); + } + #else /* #if ( ! ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) ) */ + { UBaseType_t uxSavedInterruptStatus = kernelENTER_CRITICAL_FROM_ISR(); - { /* Lock the kernel data group as we are about to access its members */ if( listLIST_IS_EMPTY( pxEventList ) == pdFALSE ) @@ -5921,10 +5930,11 @@ BaseType_t xTaskRemoveFromEventList( const List_t * const pxEventList ) } } kernelEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); - - return xReturn; } -#endif /* if ( portUSING_GRANULAR_LOCKS == 1 ) */ + #endif /* if ( !( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) ) */ + + return xReturn; +} /*-----------------------------------------------------------*/ void vTaskRemoveFromUnorderedEventList( ListItem_t * pxEventListItem, From 0e452c080975d4331e2eb47529ac75befda18509 Mon Sep 17 00:00:00 2001 From: "Ching-Hsin,Lee" Date: Fri, 22 Aug 2025 14:28:48 +0800 Subject: [PATCH 14/16] Update the logic to yield preemption disabled task --- tasks.c | 46 ++++++++++++++++++++++------------------------ 1 file changed, 22 insertions(+), 24 deletions(-) diff --git a/tasks.c b/tasks.c index 8d41f91e895..d988186b3fd 100644 --- a/tasks.c +++ b/tasks.c @@ -1020,23 +1020,12 @@ static BaseType_t prvTaskRemoveFromEventList( const List_t * const pxEventList ) #endif { #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) - { if( pxCurrentTCBs[ xCoreID ]->uxPreemptionDisable == 0U ) - { - xLowestPriorityToPreempt = xCurrentCoreTaskPriority; - xLowestPriorityCore = xCoreID; - } - else - { - xYieldPendings[ xCoreID ] = pdTRUE; - } - } - #else /* if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) */ + #endif { xLowestPriorityToPreempt = xCurrentCoreTaskPriority; xLowestPriorityCore = xCoreID; } - #endif /* if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) */ } } else @@ -1074,6 +1063,16 @@ static BaseType_t prvTaskRemoveFromEventList( const List_t * const pxEventList ) if( xLowestPriorityCore >= 0 ) #endif /* #if ( configRUN_MULTIPLE_PRIORITIES == 0 ) */ { + #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) + for( xCoreID = 0; xCoreID < configNUMBER_OF_CORES; xCoreID++ ) + { + if( ( pxCurrentTCBs[ xCoreID ]->uxPreemptionDisable != 0U ) && + ( pxCurrentTCBs[ xCoreID ]->uxPriority < xLowestPriorityToPreempt ) ) + { + xYieldPendings[ xCoreID ] = pdTRUE; + } + } + #endif prvYieldCore( xLowestPriorityCore ); } @@ -1337,29 +1336,28 @@ static BaseType_t prvTaskRemoveFromEventList( const List_t * const pxEventList ) ( xYieldPendings[ uxCore ] == pdFALSE ) ) { #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) - { if( pxCurrentTCBs[ uxCore ]->uxPreemptionDisable == 0U ) - { - xLowestPriority = xTaskPriority; - xLowestPriorityCore = ( BaseType_t ) uxCore; - } - else - { - xYieldPendings[ uxCore ] = pdTRUE; - } - } - #else /* if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) */ + #endif /* if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) */ { xLowestPriority = xTaskPriority; xLowestPriorityCore = ( BaseType_t ) uxCore; } - #endif /* if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) */ } } } if( xLowestPriorityCore >= 0 ) { + #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) + for( x = 0; x < configNUMBER_OF_CORES; x++ ) + { + if( ( pxCurrentTCBs[ x ]->uxPreemptionDisable != 0U ) && + ( pxCurrentTCBs[ x ]->uxPriority < xLowestPriority ) ) + { + xYieldPendings[ x ] = pdTRUE; + } + } + #endif prvYieldCore( xLowestPriorityCore ); } } From 5217331dc870dc3da24768e9ff3081ff95167e52 Mon Sep 17 00:00:00 2001 From: "Ching-Hsin,Lee" Date: Fri, 22 Aug 2025 14:28:48 +0800 Subject: [PATCH 15/16] Update the logic to yield preemption disabled task --- tasks.c | 46 ++++++++++++++++++++++------------------------ 1 file changed, 22 insertions(+), 24 deletions(-) diff --git a/tasks.c b/tasks.c index 8adf9d1d37c..7c975a90ece 100644 --- a/tasks.c +++ b/tasks.c @@ -1023,23 +1023,12 @@ static BaseType_t prvTaskRemoveFromEventList( const List_t * const pxEventList ) #endif { #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) - { if( pxCurrentTCBs[ xCoreID ]->uxPreemptionDisable == 0U ) - { - xLowestPriorityToPreempt = xCurrentCoreTaskPriority; - xLowestPriorityCore = xCoreID; - } - else - { - xYieldPendings[ xCoreID ] = pdTRUE; - } - } - #else /* if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) */ + #endif { xLowestPriorityToPreempt = xCurrentCoreTaskPriority; xLowestPriorityCore = xCoreID; } - #endif /* if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) */ } } else @@ -1077,6 +1066,16 @@ static BaseType_t prvTaskRemoveFromEventList( const List_t * const pxEventList ) if( xLowestPriorityCore >= 0 ) #endif /* #if ( configRUN_MULTIPLE_PRIORITIES == 0 ) */ { + #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) + for( xCoreID = 0; xCoreID < configNUMBER_OF_CORES; xCoreID++ ) + { + if( ( pxCurrentTCBs[ xCoreID ]->uxPreemptionDisable != 0U ) && + ( pxCurrentTCBs[ xCoreID ]->uxPriority < xLowestPriorityToPreempt ) ) + { + xYieldPendings[ xCoreID ] = pdTRUE; + } + } + #endif prvYieldCore( xLowestPriorityCore ); } @@ -1340,29 +1339,28 @@ static BaseType_t prvTaskRemoveFromEventList( const List_t * const pxEventList ) ( xYieldPendings[ uxCore ] == pdFALSE ) ) { #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) - { if( pxCurrentTCBs[ uxCore ]->uxPreemptionDisable == 0U ) - { - xLowestPriority = xTaskPriority; - xLowestPriorityCore = ( BaseType_t ) uxCore; - } - else - { - xYieldPendings[ uxCore ] = pdTRUE; - } - } - #else /* if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) */ + #endif /* if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) */ { xLowestPriority = xTaskPriority; xLowestPriorityCore = ( BaseType_t ) uxCore; } - #endif /* if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) */ } } } if( xLowestPriorityCore >= 0 ) { + #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) + for( x = 0; x < configNUMBER_OF_CORES; x++ ) + { + if( ( pxCurrentTCBs[ x ]->uxPreemptionDisable != 0U ) && + ( pxCurrentTCBs[ x ]->uxPriority < xLowestPriority ) ) + { + xYieldPendings[ x ] = pdTRUE; + } + } + #endif prvYieldCore( xLowestPriorityCore ); } } From 9035057a1d1c5c6a201b013da5486adfe1901fd6 Mon Sep 17 00:00:00 2001 From: "Ching-Hsin,Lee" Date: Fri, 22 Aug 2025 17:21:21 +0800 Subject: [PATCH 16/16] Remove unused macros --- include/FreeRTOS.h | 4 ---- 1 file changed, 4 deletions(-) diff --git a/include/FreeRTOS.h b/include/FreeRTOS.h index 5ea6bd79f87..d9bfc27e974 100644 --- a/include/FreeRTOS.h +++ b/include/FreeRTOS.h @@ -2958,10 +2958,6 @@ #error configUSE_PORT_OPTIMISED_TASK_SELECTION is not supported in SMP FreeRTOS #endif -#ifndef configLIGHTWEIGHT_CRITICAL_SECTION - #define configLIGHTWEIGHT_CRITICAL_SECTION 0 -#endif - #ifndef configINITIAL_TICK_COUNT #define configINITIAL_TICK_COUNT 0 #endif