diff --git a/event_groups.c b/event_groups.c index 66ddfb18f3..bf9c17ec98 100644 --- a/event_groups.c +++ b/event_groups.c @@ -87,24 +87,6 @@ #define event_groupsEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxEventBits ) taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); #endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */ -/* - * Locks an event group for tasks. Prevents other tasks from accessing the event group but allows - * ISRs to pend access to the event group. Caller cannot be preempted by other tasks - * after locking the event group, thus allowing the caller to execute non-deterministic - * operations. - */ - #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) - static void prvLockEventGroupForTasks( EventGroup_t * pxEventBits ) PRIVILEGED_FUNCTION; - #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ - -/* - * Unlocks an event group for tasks. Handles all pended access from ISRs, then reenables - * preemption for the caller. - */ - #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) - static BaseType_t prvUnlockEventGroupForTasks( EventGroup_t * pxEventBits ) PRIVILEGED_FUNCTION; - #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ - /* * Test the bits set in uxCurrentEventBits to see if the wait condition is met. * The wait condition is defined by xWaitForAllBits. If xWaitForAllBits is @@ -129,11 +111,19 @@ * When the task unlocks the event group, all pended access attempts are handled. */ #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) - #define event_groupsLOCK( pxEventBits ) prvLockEventGroupForTasks( pxEventBits ) - #define event_groupsUNLOCK( pxEventBits ) prvUnlockEventGroupForTasks( pxEventBits ); + #define event_groupsLOCK( pxEventBits ) taskDATA_GROUP_LOCK( &( ( pxEventBits )->xTaskSpinlock ) ) + #define event_groupsUNLOCK( pxEventBits ) taskDATA_GROUP_UNLOCK( &( ( pxEventBits )->xTaskSpinlock ) ) + #define event_groupsUNLOCK_WITH_YIELD_STATUS( pxEventBits, pxxAlreadyYielded ) \ + do { \ + taskDATA_GROUP_UNLOCK_WITH_YIELD_STATUS( &( ( pxEventBits )->xTaskSpinlock ), pxxAlreadyYielded ); \ + } while( 0 ) #else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ #define event_groupsLOCK( pxEventBits ) vTaskSuspendAll() - #define event_groupsUNLOCK( pxEventBits ) xTaskResumeAll() + #define event_groupsUNLOCK( pxEventBits ) do{ ( void ) xTaskResumeAll(); } while( 0 ) + #define event_groupsUNLOCK_WITH_YIELD_STATUS( pxEventBits, pxxAlreadyYielded ) \ + do { \ + *( pxxAlreadyYielded ) = xTaskResumeAll(); \ + } while( 0 ) #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ /*-----------------------------------------------------------*/ @@ -316,7 +306,7 @@ } } } - xAlreadyYielded = event_groupsUNLOCK( pxEventBits ); + event_groupsUNLOCK_WITH_YIELD_STATUS( pxEventBits, &xAlreadyYielded ); if( xTicksToWait != ( TickType_t ) 0 ) { @@ -472,7 +462,7 @@ traceEVENT_GROUP_WAIT_BITS_BLOCK( xEventGroup, uxBitsToWaitFor ); } } - xAlreadyYielded = event_groupsUNLOCK( pxEventBits ); + event_groupsUNLOCK_WITH_YIELD_STATUS( pxEventBits, &xAlreadyYielded ); if( xTicksToWait != ( TickType_t ) 0 ) { @@ -640,7 +630,6 @@ traceEVENT_GROUP_SET_BITS( xEventGroup, uxBitsToSet ); #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) - /* We are about to access the kernel data group non-deterministically, * thus we suspend the kernel data group.*/ vTaskSuspendAll(); @@ -721,7 +710,7 @@ ( void ) xTaskResumeAll(); #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ } - ( void ) event_groupsUNLOCK( pxEventBits ); + event_groupsUNLOCK( pxEventBits ); traceRETURN_xEventGroupSetBits( uxReturnBits ); @@ -745,7 +734,6 @@ traceEVENT_GROUP_DELETE( xEventGroup ); #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) - /* We are about to access the kernel data group non-deterministically, * thus we suspend the kernel data group.*/ vTaskSuspendAll(); @@ -763,7 +751,7 @@ ( void ) xTaskResumeAll(); #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ } - ( void ) event_groupsUNLOCK( pxEventBits ); + event_groupsUNLOCK( pxEventBits ); #if ( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 0 ) ) { @@ -867,48 +855,6 @@ traceRETURN_vEventGroupClearBitsCallback(); } /*-----------------------------------------------------------*/ - #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) - static void prvLockEventGroupForTasks( EventGroup_t * pxEventBits ) - { - /* Disable preemption so that the current task cannot be preempted by another task */ - vTaskPreemptionDisable( NULL ); - - /* Keep holding xTaskSpinlock to prevent tasks on other cores from accessing - * the event group while it is suspended. */ - portGET_SPINLOCK( portGET_CORE_ID(), &( pxEventBits->xTaskSpinlock ) ); - } - #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ -/*-----------------------------------------------------------*/ - - #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) - static BaseType_t prvUnlockEventGroupForTasks( EventGroup_t * pxEventBits ) - { - BaseType_t xReturn = pdFALSE; - - /* Release the previously held task spinlock */ - portRELEASE_SPINLOCK( portGET_CORE_ID(), &( pxEventBits->xTaskSpinlock ) ); - - /* Re-enable preemption */ - vTaskPreemptionEnable( NULL ); - - /* Yield if preemption was re-enabled*/ - if( xTaskUnlockCanYield() == pdTRUE ) - { - taskYIELD_WITHIN_API(); - - /* Return true as the task was preempted */ - xReturn = pdTRUE; - } - else - { - /* Return false as the task was not preempted */ - xReturn = pdFALSE; - } - - return xReturn; - } - #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ -/*-----------------------------------------------------------*/ static BaseType_t prvTestWaitCondition( const EventBits_t uxCurrentEventBits, const EventBits_t uxBitsToWaitFor, diff --git a/include/FreeRTOS.h b/include/FreeRTOS.h index 83caf37dea..d9bfc27e97 100644 --- a/include/FreeRTOS.h +++ b/include/FreeRTOS.h @@ -2958,10 +2958,6 @@ #error configUSE_PORT_OPTIMISED_TASK_SELECTION is not supported in SMP FreeRTOS #endif -#ifndef configLIGHTWEIGHT_CRITICAL_SECTION - #define configLIGHTWEIGHT_CRITICAL_SECTION 0 -#endif - #ifndef configINITIAL_TICK_COUNT #define configINITIAL_TICK_COUNT 0 #endif @@ -2972,8 +2968,8 @@ * portTICK_TYPE_IS_ATOMIC was not set - map the critical sections used when * the tick count is returned to the standard critical section macros. */ #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) - #define portTICK_TYPE_ENTER_CRITICAL() portENTER_CRITICAL_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ) - #define portTICK_TYPE_EXIT_CRITICAL() portEXIT_CRITICAL_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock ) + #define portTICK_TYPE_ENTER_CRITICAL() kernelENTER_CRITICAL() + #define portTICK_TYPE_EXIT_CRITICAL() kernelEXIT_CRITICAL() #else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ #define portTICK_TYPE_ENTER_CRITICAL() portENTER_CRITICAL() #define portTICK_TYPE_EXIT_CRITICAL() portEXIT_CRITICAL() diff --git a/include/task.h b/include/task.h index 9dbc8db89e..94141ad0af 100644 --- a/include/task.h +++ b/include/task.h @@ -299,11 +299,11 @@ typedef enum { \ const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID(); \ /* Task spinlock is always taken first */ \ - portGET_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) pxTaskSpinlock ); \ + portGET_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) ( pxTaskSpinlock ) ); \ /* Disable interrupts */ \ portDISABLE_INTERRUPTS(); \ /* Take the ISR spinlock next */ \ - portGET_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) pxISRSpinlock ); \ + portGET_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) ( pxISRSpinlock ) ); \ /* Increment the critical nesting count */ \ portINCREMENT_CRITICAL_NESTING_COUNT( xCoreID ); \ } \ @@ -322,11 +322,13 @@ typedef enum #define taskDATA_GROUP_ENTER_CRITICAL_FROM_ISR( pxISRSpinlock, puxSavedInterruptStatus ) \ do { \ *( puxSavedInterruptStatus ) = portSET_INTERRUPT_MASK_FROM_ISR(); \ - const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID(); \ - /* Take the ISR spinlock */ \ - portGET_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) pxISRSpinlock ); \ - /* Increment the critical nesting count */ \ - portINCREMENT_CRITICAL_NESTING_COUNT( xCoreID ); \ + { \ + const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID(); \ + /* Take the ISR spinlock */ \ + portGET_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) ( pxISRSpinlock ) ); \ + /* Increment the critical nesting count */ \ + portINCREMENT_CRITICAL_NESTING_COUNT( xCoreID ); \ + } \ } while( 0 ) #endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */ @@ -339,27 +341,27 @@ typedef enum * \ingroup GranularLocks */ #if ( portUSING_GRANULAR_LOCKS == 1 ) - #define taskDATA_GROUP_EXIT_CRITICAL( pxTaskSpinlock, pxISRSpinlock ) \ - do { \ - const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID(); \ - configASSERT( portGET_CRITICAL_NESTING_COUNT( xCoreID ) > 0U ); \ - /* Release the ISR spinlock */ \ - portRELEASE_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) pxISRSpinlock ); \ - /* Release the task spinlock */ \ - portRELEASE_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) pxTaskSpinlock ); \ - /* Decrement the critical nesting count */ \ - portDECREMENT_CRITICAL_NESTING_COUNT( xCoreID ); \ - /* Enable interrupts only if the critical nesting count is 0 */ \ - if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0 ) \ - { \ - portENABLE_INTERRUPTS(); \ - } \ - else \ - { \ - mtCOVERAGE_TEST_MARKER(); \ - } \ - /* Re-enable preemption */ \ - vTaskPreemptionEnable( NULL ); \ + #define taskDATA_GROUP_EXIT_CRITICAL( pxTaskSpinlock, pxISRSpinlock ) \ + do { \ + const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID(); \ + configASSERT( portGET_CRITICAL_NESTING_COUNT( xCoreID ) > 0U ); \ + /* Release the ISR spinlock */ \ + portRELEASE_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) ( pxISRSpinlock ) ); \ + /* Release the task spinlock */ \ + portRELEASE_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) ( pxTaskSpinlock ) ); \ + /* Decrement the critical nesting count */ \ + portDECREMENT_CRITICAL_NESTING_COUNT( xCoreID ); \ + /* Enable interrupts only if the critical nesting count is 0 */ \ + if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0 ) \ + { \ + portENABLE_INTERRUPTS(); \ + } \ + else \ + { \ + mtCOVERAGE_TEST_MARKER(); \ + } \ + /* Re-enable preemption */ \ + vTaskPreemptionEnable( NULL ); \ } while( 0 ) #endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */ @@ -379,7 +381,7 @@ typedef enum /* Decrement the critical nesting count */ \ portDECREMENT_CRITICAL_NESTING_COUNT( xCoreID ); \ /* Release the ISR spinlock */ \ - portRELEASE_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) pxISRSpinlock ); \ + portRELEASE_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) ( pxISRSpinlock ) ); \ if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0 ) \ { \ portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); \ @@ -387,6 +389,74 @@ typedef enum } while( 0 ) #endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */ +/** + * task. h + * + * Macros to lock a data group (task-level lock only). + * + * \defgroup taskDATA_GROUP_LOCK taskDATA_GROUP_LOCK + * \ingroup GranularLocks + */ +#if ( portUSING_GRANULAR_LOCKS == 1 ) + #define taskDATA_GROUP_LOCK( pxTaskSpinlock ) \ + do { \ + /* Disable preemption while holding the task spinlock. */ \ + vTaskPreemptionDisable( NULL ); \ + portGET_SPINLOCK( portGET_CORE_ID(), ( portSPINLOCK_TYPE * ) ( pxTaskSpinlock ) ); \ + } while( 0 ) +#endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */ + +/** + * task. h + * + * Macros to unlock a data group (task-level lock only). + * + * \defgroup taskDATA_GROUP_UNLOCK taskDATA_GROUP_UNLOCK + * \ingroup GranularLocks + */ +#if ( portUSING_GRANULAR_LOCKS == 1 ) + #define taskDATA_GROUP_UNLOCK( pxTaskSpinlock ) \ + do { \ + portRELEASE_SPINLOCK( portGET_CORE_ID(), ( portSPINLOCK_TYPE * ) ( pxTaskSpinlock ) ); \ + /* Re-enable preemption after releasing the task spinlock. */ \ + vTaskPreemptionEnable( NULL ); \ + } while( 0 ) +#endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */ + +/** + * task. h + * + * Macros to unlock a data group (task-level lock only). + * + * \defgroup taskDATA_GROUP_UNLOCK taskDATA_GROUP_UNLOCK + * \ingroup GranularLocks + */ +#if ( portUSING_GRANULAR_LOCKS == 1 ) + #define taskDATA_GROUP_UNLOCK( pxTaskSpinlock ) \ + do { \ + portRELEASE_SPINLOCK( portGET_CORE_ID(), ( portSPINLOCK_TYPE * ) ( pxTaskSpinlock ) ); \ + /* Re-enable preemption after releasing the task spinlock. */ \ + vTaskPreemptionEnable( NULL ); \ + } while( 0 ) +#endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */ + +/** + * task. h + * + * Macros to unlock a data group and return the task yield status. (task-level lock only). + * + * \defgroup taskDATA_GROUP_UNLOCK taskDATA_GROUP_UNLOCK_WITH_YIELD_STATUS + * \ingroup GranularLocks + */ +#if ( portUSING_GRANULAR_LOCKS == 1 ) + #define taskDATA_GROUP_UNLOCK_WITH_YIELD_STATUS( pxTaskSpinlock, pxTaskAlreadyYielded ) \ + do { \ + portRELEASE_SPINLOCK( portGET_CORE_ID(), ( portSPINLOCK_TYPE * ) ( pxTaskSpinlock ) ); \ + /* Re-enable preemption after releasing the task spinlock. */ \ + *( pxTaskAlreadyYielded ) = xCurrentTaskPreemptionEnable(); \ + } while( 0 ) +#endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */ + /*----------------------------------------------------------- * TASK CREATION API *----------------------------------------------------------*/ @@ -3681,6 +3751,7 @@ void vTaskPlaceOnEventListRestricted( List_t * const pxEventList, * making the call, otherwise pdFALSE. */ BaseType_t xTaskRemoveFromEventList( const List_t * const pxEventList ) PRIVILEGED_FUNCTION; +BaseType_t xTaskRemoveFromEventListFromISR( const List_t * const pxEventList ) PRIVILEGED_FUNCTION; void vTaskRemoveFromUnorderedEventList( ListItem_t * pxEventListItem, const TickType_t xItemValue ) PRIVILEGED_FUNCTION; @@ -3876,13 +3947,13 @@ void vTaskInternalSetTimeOutState( TimeOut_t * const pxTimeOut ) PRIVILEGED_FUNC #endif /* - * Checks whether a yield is required after portUNLOCK_DATA_GROUP() returns. - * To be called while data group is locked. + * Enable preemption of current task asl return the task already yield status. */ #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) - BaseType_t xTaskUnlockCanYield( void ); + BaseType_t xCurrentTaskPreemptionEnable( void ); #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + #if ( portUSING_MPU_WRAPPERS == 1 ) /* diff --git a/queue.c b/queue.c index 21426d448b..0cdf61b116 100644 --- a/queue.c +++ b/queue.c @@ -221,7 +221,10 @@ static void prvCopyDataFromQueue( Queue_t * const pxQueue, * Checks to see if a queue is a member of a queue set, and if so, notifies * the queue set that the queue contains data. */ - static BaseType_t prvNotifyQueueSetContainer( const Queue_t * const pxQueue ) PRIVILEGED_FUNCTION; + static BaseType_t prvNotifyQueueSetContainerGeneric( const Queue_t * const pxQueue, + BaseType_t xNotifyFromISR ) PRIVILEGED_FUNCTION; + #define prvNotifyQueueSetContainer( pxQueue ) prvNotifyQueueSetContainerGeneric( pxQueue, pdFALSE ) + #define prvNotifyQueueSetContainerFromISR( pxQueue ) prvNotifyQueueSetContainerGeneric( pxQueue, pdTRUE ) #endif /* @@ -328,25 +331,23 @@ static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength, * When the tasks unlocks the queue, all pended access attempts are handled. */ #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) - #define queueLOCK( pxQueue ) \ - do { \ - vTaskPreemptionDisable( NULL ); \ - prvLockQueue( ( pxQueue ) ); \ - portGET_SPINLOCK( portGET_CORE_ID(), &( pxQueue->xTaskSpinlock ) ); \ + #define queueLOCK( pxQueue ) \ + do { \ + taskDATA_GROUP_LOCK( &( ( pxQueue )->xTaskSpinlock ) ); \ + prvLockQueue( ( pxQueue ) ); \ } while( 0 ) - #define queueUNLOCK( pxQueue, xYieldAPI ) \ - do { \ - prvUnlockQueue( ( pxQueue ) ); \ - portRELEASE_SPINLOCK( portGET_CORE_ID(), &( pxQueue->xTaskSpinlock ) ); \ - vTaskPreemptionEnable( NULL ); \ - if( ( xYieldAPI ) == pdTRUE ) \ - { \ - taskYIELD_WITHIN_API(); \ - } \ - else \ - { \ - mtCOVERAGE_TEST_MARKER(); \ - } \ + #define queueUNLOCK( pxQueue, xYieldAPI ) \ + do { \ + prvUnlockQueue( ( pxQueue ) ); \ + taskDATA_GROUP_UNLOCK( &( ( pxQueue )->xTaskSpinlock ) ); \ + if( ( xYieldAPI ) == pdTRUE ) \ + { \ + taskYIELD_WITHIN_API(); \ + } \ + else \ + { \ + mtCOVERAGE_TEST_MARKER(); \ + } \ } while( 0 ) #else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ #define queueLOCK( pxQueue ) \ @@ -1296,7 +1297,7 @@ BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue, * in the queue has not changed. */ mtCOVERAGE_TEST_MARKER(); } - else if( prvNotifyQueueSetContainer( pxQueue ) != pdFALSE ) + else if( prvNotifyQueueSetContainerFromISR( pxQueue ) != pdFALSE ) { /* The queue is a member of a queue set, and posting * to the queue set caused a higher priority task to @@ -1319,7 +1320,7 @@ BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue, { if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE ) { - if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) + if( xTaskRemoveFromEventListFromISR( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) { /* The task waiting has a higher priority so * record that a context switch is required. */ @@ -1470,7 +1471,7 @@ BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue, { if( pxQueue->pxQueueSetContainer != NULL ) { - if( prvNotifyQueueSetContainer( pxQueue ) != pdFALSE ) + if( prvNotifyQueueSetContainerFromISR( pxQueue ) != pdFALSE ) { /* The semaphore is a member of a queue set, and * posting to the queue set caused a higher priority @@ -1493,7 +1494,7 @@ BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue, { if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE ) { - if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) + if( xTaskRemoveFromEventListFromISR( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) { /* The task waiting has a higher priority so * record that a context switch is required. */ @@ -2113,7 +2114,7 @@ BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue, { if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE ) { - if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE ) + if( xTaskRemoveFromEventListFromISR( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE ) { /* The task waiting has a higher priority than us so * force a context switch. */ @@ -3354,8 +3355,8 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue ) /*-----------------------------------------------------------*/ #if ( configUSE_QUEUE_SETS == 1 ) - - static BaseType_t prvNotifyQueueSetContainer( const Queue_t * const pxQueue ) + static BaseType_t prvNotifyQueueSetContainerGeneric( const Queue_t * const pxQueue, + BaseType_t xNotifyFromISR ) { Queue_t * pxQueueSetContainer = pxQueue->pxQueueSetContainer; BaseType_t xReturn = pdFALSE; @@ -3381,14 +3382,29 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue ) { if( listLIST_IS_EMPTY( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) == pdFALSE ) { - if( xTaskRemoveFromEventList( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) != pdFALSE ) + if( xNotifyFromISR != pdTRUE ) { - /* The task waiting has a higher priority. */ - xReturn = pdTRUE; + if( xTaskRemoveFromEventList( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) != pdFALSE ) + { + /* The task waiting has a higher priority. */ + xReturn = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } } else { - mtCOVERAGE_TEST_MARKER(); + if( xTaskRemoveFromEventListFromISR( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) != pdFALSE ) + { + /* The task waiting has a higher priority. */ + xReturn = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } } } else diff --git a/stream_buffer.c b/stream_buffer.c index 8dd11b8eb8..fe52427c6b 100644 --- a/stream_buffer.c +++ b/stream_buffer.c @@ -63,10 +63,10 @@ * Macros to mark the start and end of a critical code region. */ #if ( portUSING_GRANULAR_LOCKS == 1 ) - #define sbENTER_CRITICAL( pxStreamBuffer ) taskDATA_GROUP_ENTER_CRITICAL( &pxStreamBuffer->xTaskSpinlock, &pxStreamBuffer->xISRSpinlock ) - #define sbENTER_CRITICAL_FROM_ISR( pxStreamBuffer, puxSavedInterruptStatus ) taskDATA_GROUP_ENTER_CRITICAL_FROM_ISR( &pxStreamBuffer->xISRSpinlock, puxSavedInterruptStatus ) - #define sbEXIT_CRITICAL( pxStreamBuffer ) taskDATA_GROUP_EXIT_CRITICAL( &pxStreamBuffer->xTaskSpinlock, &pxStreamBuffer->xISRSpinlock ) - #define sbEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxStreamBuffer ) taskDATA_GROUP_EXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, &pxStreamBuffer->xISRSpinlock ) + #define sbENTER_CRITICAL( pxStreamBuffer ) taskDATA_GROUP_ENTER_CRITICAL( &( ( pxStreamBuffer )->xTaskSpinlock ), &( ( pxStreamBuffer )->xISRSpinlock ) ) + #define sbENTER_CRITICAL_FROM_ISR( pxStreamBuffer, puxSavedInterruptStatus ) taskDATA_GROUP_ENTER_CRITICAL_FROM_ISR( &( ( pxStreamBuffer )->xISRSpinlock ), puxSavedInterruptStatus ) + #define sbEXIT_CRITICAL( pxStreamBuffer ) taskDATA_GROUP_EXIT_CRITICAL( &( ( pxStreamBuffer )->xTaskSpinlock ), &( ( pxStreamBuffer )->xISRSpinlock ) ) + #define sbEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxStreamBuffer ) taskDATA_GROUP_EXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, &( ( pxStreamBuffer )->xISRSpinlock ) ) #else /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */ #define sbENTER_CRITICAL( pxStreamBuffer ) taskENTER_CRITICAL(); #define sbENTER_CRITICAL_FROM_ISR( pxStreamBuffer, puxSavedInterruptStatus ) do { *( puxSavedInterruptStatus ) = taskENTER_CRITICAL_FROM_ISR(); } while( 0 ) @@ -84,8 +84,8 @@ * When the task unlocks the stream buffer, all pended access attempts are handled. */ #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) - #define sbLOCK( pxStreamBuffer ) prvLockStreamBufferForTasks( pxStreamBuffer ) - #define sbUNLOCK( pxStreamBuffer ) prvUnlockStreamBufferForTasks( pxStreamBuffer ) + #define sbLOCK( pxStreamBuffer ) taskDATA_GROUP_LOCK( &( ( pxStreamBuffer )->xTaskSpinlock ) ) + #define sbUNLOCK( pxStreamBuffer ) taskDATA_GROUP_UNLOCK( &( ( pxStreamBuffer )->xTaskSpinlock ) ) #else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ #define sbLOCK( pxStreamBuffer ) vTaskSuspendAll() #define sbUNLOCK( pxStreamBuffer ) ( void ) xTaskResumeAll() @@ -109,7 +109,7 @@ ( pxStreamBuffer )->xTaskWaitingToSend = NULL; \ } \ } \ - ( void ) sbUNLOCK( pxStreamBuffer ); \ + sbUNLOCK( pxStreamBuffer ); \ } while( 0 ) #endif /* sbRECEIVE_COMPLETED */ @@ -189,7 +189,7 @@ ( pxStreamBuffer )->xTaskWaitingToReceive = NULL; \ } \ } \ - ( void ) sbUNLOCK( pxStreamBuffer ) + sbUNLOCK( pxStreamBuffer ) #endif /* sbSEND_COMPLETED */ /* If user has provided a per-instance send completed callback, then @@ -288,24 +288,6 @@ typedef struct StreamBufferDef_t #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ } StreamBuffer_t; -/* - * Locks a stream buffer for tasks. Prevents other tasks from accessing the stream buffer - * but allows ISRs to pend access to the stream buffer. Caller cannot be preempted - * by other tasks after locking the stream buffer, thus allowing the caller to - * execute non-deterministic operations. - */ - #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) - static void prvLockStreamBufferForTasks( StreamBuffer_t * const pxStreamBuffer ) PRIVILEGED_FUNCTION; - #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ - -/* - * Unlocks a stream buffer for tasks. Handles all pended access from ISRs, then reenables preemption - * for the caller. - */ - #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) - static void prvUnlockStreamBufferForTasks( StreamBuffer_t * const pxStreamBuffer ) PRIVILEGED_FUNCTION; - #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ - /* * The number of bytes available to be read from the buffer. */ @@ -381,31 +363,6 @@ static void prvInitialiseNewStreamBuffer( StreamBuffer_t * const pxStreamBuffer, StreamBufferCallbackFunction_t pxSendCompletedCallback, StreamBufferCallbackFunction_t pxReceiveCompletedCallback ) PRIVILEGED_FUNCTION; -/*-----------------------------------------------------------*/ - - #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) - static void prvLockStreamBufferForTasks( StreamBuffer_t * const pxStreamBuffer ) - { - /* Disable preemption so that the current task cannot be preempted by another task */ - vTaskPreemptionDisable( NULL ); - - /* Keep holding xTaskSpinlock after unlocking the data group to prevent tasks - * on other cores from accessing the stream buffer while it is suspended. */ - portGET_SPINLOCK( portGET_CORE_ID(), &( pxStreamBuffer->xTaskSpinlock ) ); - } - #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ -/*-----------------------------------------------------------*/ - - #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) - static void prvUnlockStreamBufferForTasks( StreamBuffer_t * const pxStreamBuffer ) - { - /* Release the previously held task spinlock */ - portRELEASE_SPINLOCK( portGET_CORE_ID(), &( pxStreamBuffer->xTaskSpinlock ) ); - - /* Re-enable preemption */ - vTaskPreemptionEnable( NULL ); - } - #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ /*-----------------------------------------------------------*/ #if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) diff --git a/tasks.c b/tasks.c index 91f5c5098f..d988186b3f 100644 --- a/tasks.c +++ b/tasks.c @@ -629,14 +629,6 @@ static BaseType_t prvCreateIdleTasks( void ); static void prvCheckForRunStateChange( void ); #endif /* #if ( configNUMBER_OF_CORES > 1 ) */ -#if ( configLIGHTWEIGHT_CRITICAL_SECTION == 1 ) -/* - * Checks to see if another task moved the current task out of the ready - * list while it was waiting to enter a lightweight critical section and yields, if so. - */ - static void prvLightWeightCheckForRunStateChange( void ); -#endif /* #if ( configLIGHTWEIGHT_CRITICAL_SECTION == 1 ) */ - #if ( configNUMBER_OF_CORES > 1 ) /* @@ -897,6 +889,15 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; size_t n ); #endif /* #if ( ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) ) */ + +#if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) + + static BaseType_t prvTaskPreemptionEnable( const TaskHandle_t xTask ); + +#endif /* #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) */ + +static BaseType_t prvTaskRemoveFromEventList( const List_t * const pxEventList ); + /*-----------------------------------------------------------*/ #if ( configNUMBER_OF_CORES > 1 ) @@ -968,68 +969,6 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; /*-----------------------------------------------------------*/ -#if ( configLIGHTWEIGHT_CRITICAL_SECTION == 1 ) - static void prvLightWeightCheckForRunStateChange( void ) - { - - const TCB_t * pxThisTCB; - BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID(); - - /* This must only be called from within a task. */ - portASSERT_IF_IN_ISR(); - - /* This function is always called with interrupts disabled - * so this is safe. */ - pxThisTCB = pxCurrentTCBs[ xCoreID ]; - - while( pxThisTCB->xTaskRunState == taskTASK_SCHEDULED_TO_YIELD ) - { - UBaseType_t uxPrevCriticalNesting; - - /* We are only here if we just entered a critical section - * or if we just suspended the scheduler, and another task - * has requested that we yield. - * - * This is slightly complicated since we need to save and restore - * the suspension and critical nesting counts, as well as release - * and reacquire the correct locks. And then, do it all over again - * if our state changed again during the reacquisition. */ - uxPrevCriticalNesting = portGET_CRITICAL_NESTING_COUNT( xCoreID ); - - if( uxPrevCriticalNesting > 0U ) - { - portSET_CRITICAL_NESTING_COUNT( xCoreID, 0U ); - kernelRELEASE_ISR_LOCK( xCoreID ); - } - else - { - /* The scheduler is suspended. uxSchedulerSuspended is updated - * only when the task is not requested to yield. */ - mtCOVERAGE_TEST_MARKER(); - } - - portMEMORY_BARRIER(); - - portENABLE_INTERRUPTS(); - - /* Enabling interrupts should cause this core to immediately service - * the pending interrupt and yield. After servicing the pending interrupt, - * the task needs to re-evaluate its run state within this loop, as - * other cores may have requested this task to yield, potentially altering - * its run state. */ - - portDISABLE_INTERRUPTS(); - - xCoreID = ( BaseType_t ) portGET_CORE_ID(); - kernelGET_ISR_LOCK( xCoreID ); - - portSET_CRITICAL_NESTING_COUNT( xCoreID, uxPrevCriticalNesting ); - }; -} -#endif /* #if ( configLIGHTWEIGHT_CRITICAL_SECTION == 1 ) */ - -/*-----------------------------------------------------------*/ - #if ( configNUMBER_OF_CORES > 1 ) static void prvYieldForTask( const TCB_t * pxTCB ) { @@ -1044,7 +983,6 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; #endif /* #if ( configRUN_MULTIPLE_PRIORITIES == 0 ) */ #if ( configRUN_MULTIPLE_PRIORITIES == 0 ) - /* No task should yield for this one if it is a lower priority * than priority level of currently ready tasks. */ if( pxTCB->uxPriority >= uxTopReadyPriority ) @@ -1125,6 +1063,16 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; if( xLowestPriorityCore >= 0 ) #endif /* #if ( configRUN_MULTIPLE_PRIORITIES == 0 ) */ { + #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) + for( xCoreID = 0; xCoreID < configNUMBER_OF_CORES; xCoreID++ ) + { + if( ( pxCurrentTCBs[ xCoreID ]->uxPreemptionDisable != 0U ) && + ( pxCurrentTCBs[ xCoreID ]->uxPriority < xLowestPriorityToPreempt ) ) + { + xYieldPendings[ xCoreID ] = pdTRUE; + } + } + #endif prvYieldCore( xLowestPriorityCore ); } @@ -1389,7 +1337,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; { #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) if( pxCurrentTCBs[ uxCore ]->uxPreemptionDisable == 0U ) - #endif + #endif /* if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) */ { xLowestPriority = xTaskPriority; xLowestPriorityCore = ( BaseType_t ) uxCore; @@ -1400,6 +1348,16 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; if( xLowestPriorityCore >= 0 ) { + #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) + for( x = 0; x < configNUMBER_OF_CORES; x++ ) + { + if( ( pxCurrentTCBs[ x ]->uxPreemptionDisable != 0U ) && + ( pxCurrentTCBs[ x ]->uxPriority < xLowestPriority ) ) + { + xYieldPendings[ x ] = pdTRUE; + } + } + #endif prvYieldCore( xLowestPriorityCore ); } } @@ -2371,7 +2329,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, configASSERT( pxTCB != NULL ); #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) - + { /* If the task has disabled preemption, we need to defer the deletion until the * task enables preemption. The deletion will be performed in vTaskPreemptionEnable(). */ if( pxTCB->uxPreemptionDisable > 0U ) @@ -2384,6 +2342,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, /* Reset the deferred state change flags */ pxTCB->uxDeferredStateChange &= ~tskDEFERRED_DELETION; } + } #endif /* configUSE_TASK_PREEMPTION_DISABLE */ #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) @@ -2815,7 +2774,15 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, traceENTER_uxTaskPriorityGet( xTask ); - kernelENTER_CRITICAL(); + #if ( ( configNUMBER_OF_CORES > 1 ) ) + { + kernelENTER_CRITICAL(); + } + #else + { + portBASE_TYPE_ENTER_CRITICAL(); + } + #endif { /* If null is passed in here then it is the priority of the task * that called uxTaskPriorityGet() that is being queried. */ @@ -2824,7 +2791,15 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, uxReturn = pxTCB->uxPriority; } - kernelEXIT_CRITICAL(); + #if ( ( configNUMBER_OF_CORES > 1 ) ) + { + kernelEXIT_CRITICAL(); + } + #else + { + portBASE_TYPE_EXIT_CRITICAL(); + } + #endif traceRETURN_uxTaskPriorityGet( uxReturn ); @@ -2893,7 +2868,15 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, traceENTER_uxTaskBasePriorityGet( xTask ); - kernelENTER_CRITICAL(); + #if ( ( configNUMBER_OF_CORES > 1 ) ) + { + kernelENTER_CRITICAL(); + } + #else + { + portBASE_TYPE_ENTER_CRITICAL(); + } + #endif { /* If null is passed in here then it is the base priority of the task * that called uxTaskBasePriorityGet() that is being queried. */ @@ -2902,7 +2885,15 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, uxReturn = pxTCB->uxBasePriority; } - kernelEXIT_CRITICAL(); + #if ( ( configNUMBER_OF_CORES > 1 ) ) + { + kernelEXIT_CRITICAL(); + } + #else + { + portBASE_TYPE_EXIT_CRITICAL(); + } + #endif traceRETURN_uxTaskBasePriorityGet( uxReturn ); @@ -3050,12 +3041,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, /* Setting the priority of a running task down means * there may now be another task of higher priority that * is ready to execute. */ - #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) - if( pxTCB->uxPreemptionDisable == 0U ) - #endif - { - xYieldRequired = pdTRUE; - } + xYieldRequired = pdTRUE; } else { @@ -3242,14 +3228,30 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, traceENTER_vTaskCoreAffinityGet( xTask ); - kernelENTER_CRITICAL(); + #if ( ( configNUMBER_OF_CORES > 1 ) ) + { + kernelENTER_CRITICAL(); + } + #else + { + portBASE_TYPE_ENTER_CRITICAL(); + } + #endif { pxTCB = prvGetTCBFromHandle( xTask ); configASSERT( pxTCB != NULL ); uxCoreAffinityMask = pxTCB->uxCoreAffinityMask; } - kernelEXIT_CRITICAL(); + #if ( ( configNUMBER_OF_CORES > 1 ) ) + { + kernelEXIT_CRITICAL(); + } + #else + { + portBASE_TYPE_EXIT_CRITICAL(); + } + #endif traceRETURN_vTaskCoreAffinityGet( uxCoreAffinityMask ); @@ -3380,11 +3382,7 @@ void vKernelLightExitCritical( void ) traceENTER_vTaskPreemptionDisable( xTask ); - #if ( configLIGHTWEIGHT_CRITICAL_SECTION == 1 ) - vKernelLightWeightEnterCritical(); - #else - kernelENTER_CRITICAL(); - #endif + kernelENTER_CRITICAL(); { if( xSchedulerRunning != pdFALSE ) { @@ -3398,11 +3396,7 @@ void vKernelLightExitCritical( void ) mtCOVERAGE_TEST_MARKER(); } } - #if ( configLIGHTWEIGHT_CRITICAL_SECTION == 1 ) - vKernelLightWeightExitCritical(); - #else - kernelEXIT_CRITICAL(); - #endif + kernelEXIT_CRITICAL(); traceRETURN_vTaskPreemptionDisable(); } @@ -3412,19 +3406,16 @@ void vKernelLightExitCritical( void ) #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) - void vTaskPreemptionEnable( const TaskHandle_t xTask ) + static BaseType_t prvTaskPreemptionEnable( const TaskHandle_t xTask ) { TCB_t * pxTCB; - UBaseType_t uxDeferredAction = 0U; - - traceENTER_vTaskPreemptionEnable( xTask ); + BaseType_t xTaskAlreadyYielded = pdFALSE; + BaseType_t xCoreID; - #if ( configLIGHTWEIGHT_CRITICAL_SECTION == 1 ) - vKernelLightWeightEnterCritical(); - #else - kernelENTER_CRITICAL(); - #endif + kernelENTER_CRITICAL(); { + xCoreID = portGET_CORE_ID(); + if( xSchedulerRunning != pdFALSE ) { pxTCB = prvGetTCBFromHandle( xTask ); @@ -3439,14 +3430,39 @@ void vKernelLightExitCritical( void ) * preemption was disabled. */ if( pxTCB->uxDeferredStateChange != 0U ) { - /* Capture the deferred action to perform outside critical section */ - uxDeferredAction = pxTCB->uxDeferredStateChange; + if( pxTCB->uxDeferredStateChange & tskDEFERRED_DELETION ) + { + vTaskDelete( xTask ); + } + else if( pxTCB->uxDeferredStateChange & tskDEFERRED_SUSPENSION ) + { + vTaskSuspend( xTask ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + pxTCB->uxDeferredStateChange = 0U; + xTaskAlreadyYielded = pdTRUE; } else { - if( xYieldPendings[ pxTCB->xTaskRunState ] != pdFALSE && ( taskTASK_IS_RUNNING( pxTCB ) != pdFALSE ) ) + if( taskTASK_IS_RUNNING( pxTCB ) == pdTRUE ) { - prvYieldCore( pxTCB->xTaskRunState ); + if( pxTCB->xTaskRunState != xCoreID ) + { + /* When enable preemption of other tasks, the task is + * should handle the pending yield request for other tasks. */ + prvYieldCore( pxTCB->xTaskRunState ); + xTaskAlreadyYielded = pdTRUE; + } + else + { + /* The pending yield request will be handled after leaving + * the critical section. */ + xTaskAlreadyYielded = xYieldPendings[ xCoreID ]; + } } else { @@ -3464,28 +3480,21 @@ void vKernelLightExitCritical( void ) mtCOVERAGE_TEST_MARKER(); } } - #if ( configLIGHTWEIGHT_CRITICAL_SECTION == 1 ) - vKernelLightWeightExitCritical(); - #else - kernelEXIT_CRITICAL(); - #endif + kernelEXIT_CRITICAL(); - /* Handle deferred actions outside critical section */ - if( uxDeferredAction != 0U ) - { - if( uxDeferredAction & tskDEFERRED_DELETION ) - { - vTaskDelete( xTask ); - } - else if( uxDeferredAction & tskDEFERRED_SUSPENSION ) - { - vTaskSuspend( xTask ); - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - } + return xTaskAlreadyYielded; + } + + BaseType_t xCurrentTaskPreemptionEnable( void ) + { + return prvTaskPreemptionEnable( NULL ); + } + + void vTaskPreemptionEnable( const TaskHandle_t xTask ) + { + traceENTER_vTaskPreemptionEnable( xTask ); + + ( void ) prvTaskPreemptionEnable( xTask ); traceRETURN_vTaskPreemptionEnable(); } @@ -3513,7 +3522,7 @@ void vKernelLightExitCritical( void ) configASSERT( pxTCB != NULL ); #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) - + { /* If the task has disabled preemption, we need to defer the suspension until the * task enables preemption. The suspension will be performed in vTaskPreemptionEnable(). */ if( pxTCB->uxPreemptionDisable > 0U ) @@ -3526,6 +3535,7 @@ void vKernelLightExitCritical( void ) /* Reset the deferred state change flags */ pxTCB->uxDeferredStateChange &= ~tskDEFERRED_SUSPENSION; } + } #endif /* configUSE_TASK_PREEMPTION_DISABLE */ #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) @@ -3772,12 +3782,10 @@ void vKernelLightExitCritical( void ) configASSERT( xTaskToResume ); #if ( configNUMBER_OF_CORES == 1 ) - /* The parameter cannot be NULL as it is impossible to resume the * currently executing task. */ if( ( pxTCB != pxCurrentTCB ) && ( pxTCB != NULL ) ) #else - /* The parameter cannot be NULL as it is impossible to resume the * currently executing task. It is also impossible to resume a task * that is actively running on another core but it is not safe @@ -3789,7 +3797,7 @@ void vKernelLightExitCritical( void ) kernelENTER_CRITICAL(); { #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) - + { /* If the task being resumed is in a deferred suspension state, * we simply clear the deferred suspension state and return. */ if( pxTCB->uxDeferredStateChange & tskDEFERRED_SUSPENSION ) @@ -3801,6 +3809,7 @@ void vKernelLightExitCritical( void ) { mtCOVERAGE_TEST_MARKER(); } + } #endif /* configUSE_TASK_PREEMPTION_DISABLE */ #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) @@ -3849,6 +3858,10 @@ void vKernelLightExitCritical( void ) TCB_t * const pxTCB = xTaskToResume; UBaseType_t uxSavedInterruptStatus; + #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) + BaseType_t xTaskResumed = pdFALSE; + #endif /* #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) */ + traceENTER_xTaskResumeFromISR( xTaskToResume ); configASSERT( xTaskToResume ); @@ -3876,58 +3889,79 @@ void vKernelLightExitCritical( void ) /* coverity[misra_c_2012_directive_4_7_violation] */ uxSavedInterruptStatus = kernelENTER_CRITICAL_FROM_ISR(); { - if( prvTaskIsTaskSuspended( pxTCB ) != pdFALSE ) + #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) { - traceTASK_RESUME_FROM_ISR( pxTCB ); + /* If the task being resumed is in a deferred suspension state, + * we simply clear the deferred suspension state and return. */ + if( pxTCB->uxDeferredStateChange & tskDEFERRED_SUSPENSION ) + { + pxTCB->uxDeferredStateChange &= ~tskDEFERRED_SUSPENSION; + xTaskResumed = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #endif /* configUSE_TASK_PREEMPTION_DISABLE */ - /* Check the ready lists can be accessed. */ - if( uxSchedulerSuspended == ( UBaseType_t ) 0U ) + #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) + if( xTaskResumed == pdFALSE ) + #endif /* configUSE_TASK_PREEMPTION_DISABLE */ + { + if( prvTaskIsTaskSuspended( pxTCB ) != pdFALSE ) { - #if ( configNUMBER_OF_CORES == 1 ) + traceTASK_RESUME_FROM_ISR( pxTCB ); + + /* Check the ready lists can be accessed. */ + if( uxSchedulerSuspended == ( UBaseType_t ) 0U ) { - /* Ready lists can be accessed so move the task from the - * suspended list to the ready list directly. */ - if( pxTCB->uxPriority > pxCurrentTCB->uxPriority ) + #if ( configNUMBER_OF_CORES == 1 ) { - xYieldRequired = pdTRUE; + /* Ready lists can be accessed so move the task from the + * suspended list to the ready list directly. */ + if( pxTCB->uxPriority > pxCurrentTCB->uxPriority ) + { + xYieldRequired = pdTRUE; - /* Mark that a yield is pending in case the user is not - * using the return value to initiate a context switch - * from the ISR using the port specific portYIELD_FROM_ISR(). */ - xYieldPendings[ 0 ] = pdTRUE; + /* Mark that a yield is pending in case the user is not + * using the return value to initiate a context switch + * from the ISR using the port specific portYIELD_FROM_ISR(). */ + xYieldPendings[ 0 ] = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } } - else + #endif /* #if ( configNUMBER_OF_CORES == 1 ) */ + + ( void ) uxListRemove( &( pxTCB->xStateListItem ) ); + prvAddTaskToReadyList( pxTCB ); + } + else + { + /* The delayed or ready lists cannot be accessed so the task + * is held in the pending ready list until the scheduler is + * unsuspended. */ + vListInsertEnd( &( xPendingReadyList ), &( pxTCB->xEventListItem ) ); + } + + #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_PREEMPTION == 1 ) ) + { + prvYieldForTask( pxTCB ); + + if( xYieldPendings[ portGET_CORE_ID() ] != pdFALSE ) { - mtCOVERAGE_TEST_MARKER(); + xYieldRequired = pdTRUE; } } - #endif /* #if ( configNUMBER_OF_CORES == 1 ) */ - - ( void ) uxListRemove( &( pxTCB->xStateListItem ) ); - prvAddTaskToReadyList( pxTCB ); + #endif /* #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_PREEMPTION == 1 ) ) */ } else { - /* The delayed or ready lists cannot be accessed so the task - * is held in the pending ready list until the scheduler is - * unsuspended. */ - vListInsertEnd( &( xPendingReadyList ), &( pxTCB->xEventListItem ) ); + mtCOVERAGE_TEST_MARKER(); } - - #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_PREEMPTION == 1 ) ) - { - prvYieldForTask( pxTCB ); - - if( xYieldPendings[ portGET_CORE_ID() ] != pdFALSE ) - { - xYieldRequired = pdTRUE; - } - } - #endif /* #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_PREEMPTION == 1 ) ) */ - } - else - { - mtCOVERAGE_TEST_MARKER(); } } kernelEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); @@ -4536,11 +4570,7 @@ BaseType_t xTaskResumeAll( void ) } } - if( xYieldPendings[ xCoreID ] != pdFALSE - #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) - && ( pxCurrentTCBs[ xCoreID ]->uxPreemptionDisable == 0U ) - #endif - ) + if( xYieldPendings[ xCoreID ] != pdFALSE ) { #if ( configUSE_PREEMPTION != 0 ) { @@ -5145,10 +5175,14 @@ BaseType_t xTaskIncrementTick( void ) TickType_t xItemValue; BaseType_t xSwitchRequired = pdFALSE; + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) + UBaseType_t uxSavedInterruptStatus; + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + traceENTER_xTaskIncrementTick(); #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) - UBaseType_t uxSavedInterruptStatus = kernelENTER_CRITICAL_FROM_ISR(); + uxSavedInterruptStatus = kernelENTER_CRITICAL_FROM_ISR(); #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ /* Called by the portable layer each time a tick interrupt occurs. @@ -5284,32 +5318,14 @@ BaseType_t xTaskIncrementTick( void ) { #if ( configNUMBER_OF_CORES == 1 ) { - #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) - if( pxCurrentTCB->uxPreemptionDisable != 0U ) - { - mtCOVERAGE_TEST_MARKER(); - } - else - { - if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ pxCurrentTCB->uxPriority ] ) ) > 1U ) - { - xSwitchRequired = pdTRUE; - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - } - #else /* #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) */ - if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ pxCurrentTCB->uxPriority ] ) ) > 1U ) - { - xSwitchRequired = pdTRUE; - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - #endif /* #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) */ + if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ pxCurrentTCB->uxPriority ] ) ) > 1U ) + { + xSwitchRequired = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } } #else /* #if ( configNUMBER_OF_CORES == 1 ) */ { @@ -5755,7 +5771,6 @@ void vTaskPlaceOnEventList( List_t * const pxEventList, /* Suspend the kernel data group as we are about to access its members */ vTaskSuspendAll(); #else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ - /* THIS FUNCTION MUST BE CALLED WITH THE * SCHEDULER SUSPENDED AND THE QUEUE BEING ACCESSED LOCKED. */ configASSERT( uxSchedulerSuspended != ( UBaseType_t ) 0U ); @@ -5798,7 +5813,6 @@ void vTaskPlaceOnUnorderedEventList( List_t * pxEventList, /* Suspend the kernel data group as we are about to access its members */ vTaskSuspendAll(); #else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ - /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. It is used by * the event groups implementation. */ configASSERT( uxSchedulerSuspended != ( UBaseType_t ) 0U ); @@ -5841,7 +5855,6 @@ void vTaskPlaceOnUnorderedEventList( List_t * pxEventList, /* Suspend the kernel data group as we are about to access its members */ vTaskSuspendAll(); #else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ - /* This function should not be called by application code hence the * 'Restricted' in its name. It is not part of the public API. It is * designed for use by kernel code, and has special calling requirements - @@ -5877,38 +5890,11 @@ void vTaskPlaceOnUnorderedEventList( List_t * pxEventList, #endif /* configUSE_TIMERS */ /*-----------------------------------------------------------*/ -BaseType_t xTaskRemoveFromEventList( const List_t * const pxEventList ) +static BaseType_t prvTaskRemoveFromEventList( const List_t * const pxEventList ) { TCB_t * pxUnblockedTCB; BaseType_t xReturn; - traceENTER_xTaskRemoveFromEventList( pxEventList ); - - #if ( !( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) ) - - /* THIS FUNCTION MUST BE CALLED FROM A CRITICAL SECTION. It can also be - * called from a critical section within an ISR. */ - #else /* #if ( ! ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) ) */ - /* Lock the kernel data group as we are about to access its members */ - UBaseType_t uxSavedInterruptStatus; - - if( portCHECK_IF_IN_ISR() == pdTRUE ) - { - uxSavedInterruptStatus = kernelENTER_CRITICAL_FROM_ISR(); - } - else - { - uxSavedInterruptStatus = 0; - kernelENTER_CRITICAL(); - } - - /* Before taking the kernel lock, another task/ISR could have already - * emptied the pxEventList. So we insert a check here to see if - * pxEventList is empty before attempting to remove an item from it. */ - if( listLIST_IS_EMPTY( pxEventList ) == pdFALSE ) - { - #endif /* #if ( ! ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) ) */ - /* The event list is sorted in priority order, so the first in the list can * be removed as it is known to be the highest priority. Remove the TCB from * the delayed list, and add it to the ready list. @@ -5987,27 +5973,74 @@ BaseType_t xTaskRemoveFromEventList( const List_t * const pxEventList ) } #endif /* #if ( configNUMBER_OF_CORES == 1 ) */ - #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) -} -else -{ - /* The pxEventList was emptied before we entered the critical - * section, Nothing to do except return pdFALSE. */ - xReturn = pdFALSE; + return xReturn; } +/*-----------------------------------------------------------*/ -/* We are done accessing the kernel data group. Unlock it. */ -if( portCHECK_IF_IN_ISR() == pdTRUE ) +BaseType_t xTaskRemoveFromEventList( const List_t * const pxEventList ) { - kernelEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); + BaseType_t xReturn; + + traceENTER_xTaskRemoveFromEventList( pxEventList ); + + #if ( !( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) ) + { + /* THIS FUNCTION MUST BE CALLED FROM A CRITICAL SECTION. It can also be + * called from a critical section within an ISR. */ + xReturn = prvTaskRemoveFromEventList( pxEventList ); + } + #else /* #if ( ! ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) ) */ + { + kernelENTER_CRITICAL(); + { + /* Lock the kernel data group as we are about to access its members */ + if( listLIST_IS_EMPTY( pxEventList ) == pdFALSE ) + { + xReturn = prvTaskRemoveFromEventList( pxEventList ); + } + else + { + xReturn = pdFALSE; + } + } + kernelEXIT_CRITICAL(); + } + #endif /* if ( !( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) ) */ + + traceRETURN_xTaskRemoveFromEventList( xReturn ); + + return xReturn; } -else +/*-----------------------------------------------------------*/ + +BaseType_t xTaskRemoveFromEventListFromISR( const List_t * const pxEventList ) { - kernelEXIT_CRITICAL(); -} - #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ + BaseType_t xReturn; + + #if ( !( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) ) + { + /* THIS FUNCTION MUST BE CALLED FROM A CRITICAL SECTION. It can also be + * called from a critical section within an ISR. */ + xReturn = prvTaskRemoveFromEventList( pxEventList ); + } + #else /* #if ( ! ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) ) */ + { + UBaseType_t uxSavedInterruptStatus = kernelENTER_CRITICAL_FROM_ISR(); + { + /* Lock the kernel data group as we are about to access its members */ + if( listLIST_IS_EMPTY( pxEventList ) == pdFALSE ) + { + xReturn = prvTaskRemoveFromEventList( pxEventList ); + } + else + { + xReturn = pdFALSE; + } + } + kernelEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); + } + #endif /* if ( !( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) ) */ - traceRETURN_xTaskRemoveFromEventList( xReturn ); return xReturn; } /*-----------------------------------------------------------*/ @@ -7191,9 +7224,7 @@ static void prvResetNextTaskUnblockTime( void ) traceENTER_xTaskPriorityInherit( pxMutexHolder ); - #if ( portUSING_GRANULAR_LOCKS == 1 ) - kernelENTER_CRITICAL(); - #endif + kernelENTER_CRITICAL(); { /* If the mutex is taken by an interrupt, the mutex holder is NULL. Priority * inheritance is not applied in this scenario. */ @@ -7281,9 +7312,7 @@ static void prvResetNextTaskUnblockTime( void ) mtCOVERAGE_TEST_MARKER(); } } - #if ( portUSING_GRANULAR_LOCKS == 1 ) - kernelEXIT_CRITICAL(); - #endif + kernelEXIT_CRITICAL(); traceRETURN_xTaskPriorityInherit( xReturn ); @@ -7592,7 +7621,7 @@ static void prvResetNextTaskUnblockTime( void ) #endif /* #if ( ( portCRITICAL_NESTING_IN_TCB == 1 ) && ( configNUMBER_OF_CORES == 1 ) ) */ /*-----------------------------------------------------------*/ -#if ( ( configNUMBER_OF_CORES > 1 ) && ( portUSING_GRANULAR_LOCKS != 1 ) ) +#if ( configNUMBER_OF_CORES > 1 ) void vTaskEnterCritical( void ) { @@ -7604,7 +7633,13 @@ static void prvResetNextTaskUnblockTime( void ) if( xSchedulerRunning != pdFALSE ) { - if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0U ) + /* When using granular locks, the critical section nesting count + * might have already been incremented if this call is a nested + * call from a data group critical section. Hence, we have to + * acquire the kernel task and ISR locks unconditionally. */ + #if ( portUSING_GRANULAR_LOCKS != 1 ) + if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0U ) + #endif /* portUSING_GRANULAR_LOCKS */ { kernelGET_TASK_LOCK( xCoreID ); kernelGET_ISR_LOCK( xCoreID ); @@ -7641,56 +7676,7 @@ static void prvResetNextTaskUnblockTime( void ) traceRETURN_vTaskEnterCritical(); } -#endif /* #if ( ( configNUMBER_OF_CORES > 1 ) && ( portUSING_GRANULAR_LOCKS != 1 ) ) */ -/*-----------------------------------------------------------*/ - -#if ( ( configNUMBER_OF_CORES > 1 ) && ( portUSING_GRANULAR_LOCKS == 1 ) ) - - void vTaskEnterCritical( void ) - { - traceENTER_vTaskEnterCritical(); - - portDISABLE_INTERRUPTS(); - { - const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID(); - - if( xSchedulerRunning != pdFALSE ) - { - kernelGET_TASK_LOCK( xCoreID ); - kernelGET_ISR_LOCK( xCoreID ); - - portINCREMENT_CRITICAL_NESTING_COUNT( xCoreID ); - - /* This is not the interrupt safe version of the enter critical - * function so assert() if it is being called from an interrupt - * context. Only API functions that end in "FromISR" can be used in an - * interrupt. Only assert if the critical nesting count is 1 to - * protect against recursive calls if the assert function also uses a - * critical section. */ - if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 1U ) - { - portASSERT_IF_IN_ISR(); - - if( uxSchedulerSuspended == 0U ) - { - /* The only time there would be a problem is if this is called - * before a context switch and vTaskExitCritical() is called - * after pxCurrentTCB changes. Therefore this should not be - * used within vTaskSwitchContext(). */ - prvCheckForRunStateChange(); - } - } - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - } - - traceRETURN_vTaskEnterCritical(); - } - -#endif /* #if ( ( configNUMBER_OF_CORES > 1 ) && ( portUSING_GRANULAR_LOCKS == 1 ) ) */ +#endif /* #if ( configNUMBER_OF_CORES > 1 ) */ /*-----------------------------------------------------------*/ #if ( configNUMBER_OF_CORES > 1 ) @@ -7771,7 +7757,7 @@ static void prvResetNextTaskUnblockTime( void ) #endif /* #if ( ( portCRITICAL_NESTING_IN_TCB == 1 ) && ( configNUMBER_OF_CORES == 1 ) ) */ /*-----------------------------------------------------------*/ -#if ( ( configNUMBER_OF_CORES > 1 ) && ( portUSING_GRANULAR_LOCKS != 1 ) ) +#if ( configNUMBER_OF_CORES > 1 ) void vTaskExitCritical( void ) { @@ -7791,82 +7777,43 @@ static void prvResetNextTaskUnblockTime( void ) if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) > 0U ) { - portDECREMENT_CRITICAL_NESTING_COUNT( xCoreID ); + BaseType_t xYieldCurrentTask = pdFALSE; - if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0U ) + /* Get the xYieldPending stats inside the critical section. */ + if( uxSchedulerSuspended == 0U ) { - BaseType_t xYieldCurrentTask; - - /* Get the xYieldPending stats inside the critical section. */ - xYieldCurrentTask = xYieldPendings[ xCoreID ]; - - kernelRELEASE_ISR_LOCK( xCoreID ); - kernelRELEASE_TASK_LOCK( xCoreID ); - portENABLE_INTERRUPTS(); - - /* When a task yields in a critical section it just sets - * xYieldPending to true. So now that we have exited the - * critical section check if xYieldPending is true, and - * if so yield. */ - if( xYieldCurrentTask != pdFALSE ) + #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) + if( ( pxCurrentTCBs[ xCoreID ]->uxPreemptionDisable == 0U ) && + ( pxCurrentTCBs[ xCoreID ]->uxDeferredStateChange == 0U ) ) + #endif /* ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) */ { - portYIELD(); + xYieldCurrentTask = xYieldPendings[ xCoreID ]; } } else { mtCOVERAGE_TEST_MARKER(); } - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - - traceRETURN_vTaskExitCritical(); - } - -#endif /* #if ( ( configNUMBER_OF_CORES > 1 ) && ( portUSING_GRANULAR_LOCKS != 1 ) ) */ -/*-----------------------------------------------------------*/ - -#if ( ( configNUMBER_OF_CORES > 1 ) && ( portUSING_GRANULAR_LOCKS == 1 ) ) - - void vTaskExitCritical( void ) - { - const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID(); - - traceENTER_vTaskExitCritical(); - - if( xSchedulerRunning != pdFALSE ) - { - /* If critical nesting count is zero then this function - * does not match a previous call to vTaskEnterCritical(). */ - configASSERT( portGET_CRITICAL_NESTING_COUNT( xCoreID ) > 0U ); - - /* This function should not be called in ISR. Use vTaskExitCriticalFromISR - * to exit critical section from ISR. */ - portASSERT_IF_IN_ISR(); - if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) > 0U ) - { - /* Release the ISR and task locks */ - kernelRELEASE_ISR_LOCK( xCoreID ); - kernelRELEASE_TASK_LOCK( xCoreID ); + /* Release the ISR and task locks first when using granular locks. */ + #if ( portUSING_GRANULAR_LOCKS == 1 ) + { + /* Critical nesting count is used to count interrupt status. + * The spinlock is implemented recursively. */ + kernelRELEASE_ISR_LOCK( xCoreID ); + kernelRELEASE_TASK_LOCK( xCoreID ); + } + #endif portDECREMENT_CRITICAL_NESTING_COUNT( xCoreID ); - /* If the critical nesting count is 0, enable interrupts */ if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0U ) { - BaseType_t xYieldCurrentTask; - - /* Get the xYieldPending stats inside the critical section. */ - xYieldCurrentTask = xTaskUnlockCanYield(); - + #if ( portUSING_GRANULAR_LOCKS == 0 ) + { + kernelRELEASE_ISR_LOCK( xCoreID ); + kernelRELEASE_TASK_LOCK( xCoreID ); + } + #endif portENABLE_INTERRUPTS(); /* When a task yields in a critical section it just sets @@ -7896,7 +7843,7 @@ static void prvResetNextTaskUnblockTime( void ) traceRETURN_vTaskExitCritical(); } -#endif /* #if ( ( configNUMBER_OF_CORES > 1 ) && ( portUSING_GRANULAR_LOCKS == 1 ) ) */ +#endif /* #if ( configNUMBER_OF_CORES > 1 ) */ /*-----------------------------------------------------------*/ #if ( configNUMBER_OF_CORES > 1 ) @@ -7945,104 +7892,6 @@ static void prvResetNextTaskUnblockTime( void ) #endif /* #if ( configNUMBER_OF_CORES > 1 ) */ /*-----------------------------------------------------------*/ -#if ( configLIGHTWEIGHT_CRITICAL_SECTION == 1 ) - - void vKernelLightWeightEnterCritical( void ) - { - if( xSchedulerRunning != pdFALSE ) - { - portDISABLE_INTERRUPTS(); - { - const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID(); - - /* Get only the ISR lock, not the task lock */ - kernelGET_ISR_LOCK( xCoreID ); - - portINCREMENT_CRITICAL_NESTING_COUNT( xCoreID ); - - if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 1U - #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) - /* Check for the run state change of the task only if a deferred state change is not pending */ - && pxCurrentTCB->uxDeferredStateChange == 0U - #endif /* ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) */ - ) - { - prvLightWeightCheckForRunStateChange(); - } - } - } - } - -#endif /* #if ( configLIGHTWEIGHT_CRITICAL_SECTION == 1 ) */ -/*-----------------------------------------------------------*/ - -#if ( configLIGHTWEIGHT_CRITICAL_SECTION == 1 ) - - void vKernelLightWeightExitCritical( void ) - { - if( xSchedulerRunning != pdFALSE ) - { - const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID(); - - if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) > 0U ) - { - /* Release the ISR lock */ - kernelRELEASE_ISR_LOCK( xCoreID ); - - portDECREMENT_CRITICAL_NESTING_COUNT( xCoreID ); - - BaseType_t xYieldCurrentTask; - - xYieldCurrentTask = xTaskUnlockCanYield(); - - /* If the critical nesting count is 0, enable interrupts */ - if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0U ) - { - portENABLE_INTERRUPTS(); - - if( xYieldCurrentTask != pdFALSE - #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) - /* Yield only if no deferred state change is pending */ - && pxCurrentTCB->uxDeferredStateChange == 0U - #endif /* ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) */ - ) - { - portYIELD(); - } - } - } - } - } - -#endif /* #if ( configLIGHTWEIGHT_CRITICAL_SECTION == 1 ) */ -/*-----------------------------------------------------------*/ - -#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) - - BaseType_t xTaskUnlockCanYield( void ) - { - BaseType_t xReturn; - BaseType_t xCoreID = portGET_CORE_ID(); - - if( ( xYieldPendings[ xCoreID ] == pdTRUE ) && ( uxSchedulerSuspended == pdFALSE ) - #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) - && ( pxCurrentTCBs[ xCoreID ]->uxPreemptionDisable == 0U ) - #endif /* ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) */ - ) - { - xReturn = pdTRUE; - } - else - { - xReturn = pdFALSE; - } - - return xReturn; - } - -#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ -/*-----------------------------------------------------------*/ - #if ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) static char * prvWriteNameToBuffer( char * pcBuffer, diff --git a/timers.c b/timers.c index 6408b65164..53097568cc 100644 --- a/timers.c +++ b/timers.c @@ -83,8 +83,8 @@ * Macros to mark the start and end of a critical code region. */ #if ( portUSING_GRANULAR_LOCKS == 1 ) - #define tmrENTER_CRITICAL() taskDATA_GROUP_ENTER_CRITICAL( &xTaskSpinlock, &xISRSpinlock ) - #define tmrEXIT_CRITICAL() taskDATA_GROUP_EXIT_CRITICAL( &xTaskSpinlock, &xISRSpinlock ) + #define tmrENTER_CRITICAL() taskDATA_GROUP_ENTER_CRITICAL( &xTimerTaskSpinlock, &xTimerISRSpinlock ) + #define tmrEXIT_CRITICAL() taskDATA_GROUP_EXIT_CRITICAL( &xTimerTaskSpinlock, &xTimerISRSpinlock ) #else /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */ #define tmrENTER_CRITICAL() taskENTER_CRITICAL() #define tmrEXIT_CRITICAL() taskEXIT_CRITICAL() @@ -161,8 +161,13 @@ PRIVILEGED_DATA static TaskHandle_t xTimerTaskHandle = NULL; #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) - PRIVILEGED_DATA static portSPINLOCK_TYPE xTaskSpinlock = portINIT_SPINLOCK_STATIC; - PRIVILEGED_DATA static portSPINLOCK_TYPE xISRSpinlock = portINIT_SPINLOCK_STATIC; + #ifdef portREMOVE_STATIC_QUALIFIER + PRIVILEGED_DATA portSPINLOCK_TYPE xTimerTaskSpinlock = portINIT_SPINLOCK_STATIC; + PRIVILEGED_DATA portSPINLOCK_TYPE xTimerISRSpinlock = portINIT_SPINLOCK_STATIC; + #else + PRIVILEGED_DATA static portSPINLOCK_TYPE xTimerTaskSpinlock = portINIT_SPINLOCK_STATIC; + PRIVILEGED_DATA static portSPINLOCK_TYPE xTimerISRSpinlock = portINIT_SPINLOCK_STATIC; + #endif #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ /*-----------------------------------------------------------*/ @@ -613,7 +618,15 @@ traceENTER_xTimerGetReloadMode( xTimer ); configASSERT( xTimer ); - tmrENTER_CRITICAL(); + #if ( ( configNUMBER_OF_CORES > 1 ) ) + { + tmrENTER_CRITICAL(); + } + #else + { + portBASE_TYPE_ENTER_CRITICAL(); + } + #endif { if( ( pxTimer->ucStatus & tmrSTATUS_IS_AUTORELOAD ) == 0U ) { @@ -626,7 +639,15 @@ xReturn = pdTRUE; } } - tmrEXIT_CRITICAL(); + #if ( ( configNUMBER_OF_CORES > 1 ) ) + { + tmrEXIT_CRITICAL(); + } + #else + { + portBASE_TYPE_EXIT_CRITICAL(); + } + #endif traceRETURN_xTimerGetReloadMode( xReturn ); @@ -1188,7 +1209,15 @@ configASSERT( xTimer ); /* Is the timer in the list of active timers? */ - tmrENTER_CRITICAL(); + #if ( ( configNUMBER_OF_CORES > 1 ) ) + { + tmrENTER_CRITICAL(); + } + #else + { + portBASE_TYPE_ENTER_CRITICAL(); + } + #endif { if( ( pxTimer->ucStatus & tmrSTATUS_IS_ACTIVE ) == 0U ) { @@ -1199,7 +1228,15 @@ xReturn = pdTRUE; } } - tmrEXIT_CRITICAL(); + #if ( ( configNUMBER_OF_CORES > 1 ) ) + { + tmrEXIT_CRITICAL(); + } + #else + { + portBASE_TYPE_EXIT_CRITICAL(); + } + #endif traceRETURN_xTimerIsTimerActive( xReturn );